xref: /linux/kernel/sched/ext.c (revision 7b8e9264f55a9c320f398e337d215e68cca50131)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4  *
5  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8  */
9 #include <linux/btf_ids.h>
10 #include "ext_idle.h"
11 
12 /*
13  * NOTE: sched_ext is in the process of growing multiple scheduler support and
14  * scx_root usage is in a transitional state. Naked dereferences are safe if the
15  * caller is one of the tasks attached to SCX and explicit RCU dereference is
16  * necessary otherwise. Naked scx_root dereferences trigger sparse warnings but
17  * are used as temporary markers to indicate that the dereferences need to be
18  * updated to point to the associated scheduler instances rather than scx_root.
19  */
20 static struct scx_sched __rcu *scx_root;
21 
22 /*
23  * During exit, a task may schedule after losing its PIDs. When disabling the
24  * BPF scheduler, we need to be able to iterate tasks in every state to
25  * guarantee system safety. Maintain a dedicated task list which contains every
26  * task between its fork and eventual free.
27  */
28 static DEFINE_RAW_SPINLOCK(scx_tasks_lock);
29 static LIST_HEAD(scx_tasks);
30 
31 /* ops enable/disable */
32 static DEFINE_MUTEX(scx_enable_mutex);
33 DEFINE_STATIC_KEY_FALSE(__scx_enabled);
34 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
35 static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED);
36 static int scx_bypass_depth;
37 static cpumask_var_t scx_bypass_lb_donee_cpumask;
38 static cpumask_var_t scx_bypass_lb_resched_cpumask;
39 static bool scx_aborting;
40 static bool scx_init_task_enabled;
41 static bool scx_switching_all;
42 DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
43 
44 /*
45  * Tracks whether scx_enable() called scx_bypass(true). Used to balance bypass
46  * depth on enable failure. Will be removed when bypass depth is moved into the
47  * sched instance.
48  */
49 static bool scx_bypassed_for_enable;
50 
51 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
52 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
53 
54 /*
55  * A monotically increasing sequence number that is incremented every time a
56  * scheduler is enabled. This can be used by to check if any custom sched_ext
57  * scheduler has ever been used in the system.
58  */
59 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
60 
61 /*
62  * The maximum amount of time in jiffies that a task may be runnable without
63  * being scheduled on a CPU. If this timeout is exceeded, it will trigger
64  * scx_error().
65  */
66 static unsigned long scx_watchdog_timeout;
67 
68 /*
69  * The last time the delayed work was run. This delayed work relies on
70  * ksoftirqd being able to run to service timer interrupts, so it's possible
71  * that this work itself could get wedged. To account for this, we check that
72  * it's not stalled in the timer tick, and trigger an error if it is.
73  */
74 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
75 
76 static struct delayed_work scx_watchdog_work;
77 
78 /*
79  * For %SCX_KICK_WAIT: Each CPU has a pointer to an array of kick_sync sequence
80  * numbers. The arrays are allocated with kvzalloc() as size can exceed percpu
81  * allocator limits on large machines. O(nr_cpu_ids^2) allocation, allocated
82  * lazily when enabling and freed when disabling to avoid waste when sched_ext
83  * isn't active.
84  */
85 struct scx_kick_syncs {
86 	struct rcu_head		rcu;
87 	unsigned long		syncs[];
88 };
89 
90 static DEFINE_PER_CPU(struct scx_kick_syncs __rcu *, scx_kick_syncs);
91 
92 /*
93  * Direct dispatch marker.
94  *
95  * Non-NULL values are used for direct dispatch from enqueue path. A valid
96  * pointer points to the task currently being enqueued. An ERR_PTR value is used
97  * to indicate that direct dispatch has already happened.
98  */
99 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
100 
101 static const struct rhashtable_params dsq_hash_params = {
102 	.key_len		= sizeof_field(struct scx_dispatch_q, id),
103 	.key_offset		= offsetof(struct scx_dispatch_q, id),
104 	.head_offset		= offsetof(struct scx_dispatch_q, hash_node),
105 };
106 
107 static LLIST_HEAD(dsqs_to_free);
108 
109 /* dispatch buf */
110 struct scx_dsp_buf_ent {
111 	struct task_struct	*task;
112 	unsigned long		qseq;
113 	u64			dsq_id;
114 	u64			enq_flags;
115 };
116 
117 static u32 scx_dsp_max_batch;
118 
119 struct scx_dsp_ctx {
120 	struct rq		*rq;
121 	u32			cursor;
122 	u32			nr_tasks;
123 	struct scx_dsp_buf_ent	buf[];
124 };
125 
126 static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
127 
128 /* string formatting from BPF */
129 struct scx_bstr_buf {
130 	u64			data[MAX_BPRINTF_VARARGS];
131 	char			line[SCX_EXIT_MSG_LEN];
132 };
133 
134 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
135 static struct scx_bstr_buf scx_exit_bstr_buf;
136 
137 /* ops debug dump */
138 struct scx_dump_data {
139 	s32			cpu;
140 	bool			first;
141 	s32			cursor;
142 	struct seq_buf		*s;
143 	const char		*prefix;
144 	struct scx_bstr_buf	buf;
145 };
146 
147 static struct scx_dump_data scx_dump_data = {
148 	.cpu			= -1,
149 };
150 
151 /* /sys/kernel/sched_ext interface */
152 static struct kset *scx_kset;
153 
154 /*
155  * Parameters that can be adjusted through /sys/module/sched_ext/parameters.
156  * There usually is no reason to modify these as normal scheduler operation
157  * shouldn't be affected by them. The knobs are primarily for debugging.
158  */
159 static u64 scx_slice_dfl = SCX_SLICE_DFL;
160 static unsigned int scx_slice_bypass_us = SCX_SLICE_BYPASS / NSEC_PER_USEC;
161 static unsigned int scx_bypass_lb_intv_us = SCX_BYPASS_LB_DFL_INTV_US;
162 
163 static int set_slice_us(const char *val, const struct kernel_param *kp)
164 {
165 	return param_set_uint_minmax(val, kp, 100, 100 * USEC_PER_MSEC);
166 }
167 
168 static const struct kernel_param_ops slice_us_param_ops = {
169 	.set = set_slice_us,
170 	.get = param_get_uint,
171 };
172 
173 static int set_bypass_lb_intv_us(const char *val, const struct kernel_param *kp)
174 {
175 	return param_set_uint_minmax(val, kp, 0, 10 * USEC_PER_SEC);
176 }
177 
178 static const struct kernel_param_ops bypass_lb_intv_us_param_ops = {
179 	.set = set_bypass_lb_intv_us,
180 	.get = param_get_uint,
181 };
182 
183 #undef MODULE_PARAM_PREFIX
184 #define MODULE_PARAM_PREFIX	"sched_ext."
185 
186 module_param_cb(slice_bypass_us, &slice_us_param_ops, &scx_slice_bypass_us, 0600);
187 MODULE_PARM_DESC(slice_bypass_us, "bypass slice in microseconds, applied on [un]load (100us to 100ms)");
188 module_param_cb(bypass_lb_intv_us, &bypass_lb_intv_us_param_ops, &scx_bypass_lb_intv_us, 0600);
189 MODULE_PARM_DESC(bypass_lb_intv_us, "bypass load balance interval in microseconds (0 (disable) to 10s)");
190 
191 #undef MODULE_PARAM_PREFIX
192 
193 #define CREATE_TRACE_POINTS
194 #include <trace/events/sched_ext.h>
195 
196 static void process_ddsp_deferred_locals(struct rq *rq);
197 static u32 reenq_local(struct rq *rq);
198 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags);
199 static bool scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind,
200 		      s64 exit_code, const char *fmt, va_list args);
201 
202 static __printf(4, 5) bool scx_exit(struct scx_sched *sch,
203 				    enum scx_exit_kind kind, s64 exit_code,
204 				    const char *fmt, ...)
205 {
206 	va_list args;
207 	bool ret;
208 
209 	va_start(args, fmt);
210 	ret = scx_vexit(sch, kind, exit_code, fmt, args);
211 	va_end(args);
212 
213 	return ret;
214 }
215 
216 #define scx_error(sch, fmt, args...)	scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args)
217 #define scx_verror(sch, fmt, args)	scx_vexit((sch), SCX_EXIT_ERROR, 0, fmt, args)
218 
219 #define SCX_HAS_OP(sch, op)	test_bit(SCX_OP_IDX(op), (sch)->has_op)
220 
221 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
222 {
223 	if (time_after(at, now))
224 		return jiffies_to_msecs(at - now);
225 	else
226 		return -(long)jiffies_to_msecs(now - at);
227 }
228 
229 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */
230 static u32 higher_bits(u32 flags)
231 {
232 	return ~((1 << fls(flags)) - 1);
233 }
234 
235 /* return the mask with only the highest bit set */
236 static u32 highest_bit(u32 flags)
237 {
238 	int bit = fls(flags);
239 	return ((u64)1 << bit) >> 1;
240 }
241 
242 static bool u32_before(u32 a, u32 b)
243 {
244 	return (s32)(a - b) < 0;
245 }
246 
247 static struct scx_dispatch_q *find_global_dsq(struct scx_sched *sch,
248 					      struct task_struct *p)
249 {
250 	return sch->global_dsqs[cpu_to_node(task_cpu(p))];
251 }
252 
253 static struct scx_dispatch_q *find_user_dsq(struct scx_sched *sch, u64 dsq_id)
254 {
255 	return rhashtable_lookup(&sch->dsq_hash, &dsq_id, dsq_hash_params);
256 }
257 
258 static const struct sched_class *scx_setscheduler_class(struct task_struct *p)
259 {
260 	if (p->sched_class == &stop_sched_class)
261 		return &stop_sched_class;
262 
263 	return __setscheduler_class(p->policy, p->prio);
264 }
265 
266 /*
267  * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
268  * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
269  * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
270  * whether it's running from an allowed context.
271  *
272  * @mask is constant, always inline to cull the mask calculations.
273  */
274 static __always_inline void scx_kf_allow(u32 mask)
275 {
276 	/* nesting is allowed only in increasing scx_kf_mask order */
277 	WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
278 		  "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
279 		  current->scx.kf_mask, mask);
280 	current->scx.kf_mask |= mask;
281 	barrier();
282 }
283 
284 static void scx_kf_disallow(u32 mask)
285 {
286 	barrier();
287 	current->scx.kf_mask &= ~mask;
288 }
289 
290 /*
291  * Track the rq currently locked.
292  *
293  * This allows kfuncs to safely operate on rq from any scx ops callback,
294  * knowing which rq is already locked.
295  */
296 DEFINE_PER_CPU(struct rq *, scx_locked_rq_state);
297 
298 static inline void update_locked_rq(struct rq *rq)
299 {
300 	/*
301 	 * Check whether @rq is actually locked. This can help expose bugs
302 	 * or incorrect assumptions about the context in which a kfunc or
303 	 * callback is executed.
304 	 */
305 	if (rq)
306 		lockdep_assert_rq_held(rq);
307 	__this_cpu_write(scx_locked_rq_state, rq);
308 }
309 
310 #define SCX_CALL_OP(sch, mask, op, rq, args...)					\
311 do {										\
312 	if (rq)									\
313 		update_locked_rq(rq);						\
314 	if (mask) {								\
315 		scx_kf_allow(mask);						\
316 		(sch)->ops.op(args);						\
317 		scx_kf_disallow(mask);						\
318 	} else {								\
319 		(sch)->ops.op(args);						\
320 	}									\
321 	if (rq)									\
322 		update_locked_rq(NULL);						\
323 } while (0)
324 
325 #define SCX_CALL_OP_RET(sch, mask, op, rq, args...)				\
326 ({										\
327 	__typeof__((sch)->ops.op(args)) __ret;					\
328 										\
329 	if (rq)									\
330 		update_locked_rq(rq);						\
331 	if (mask) {								\
332 		scx_kf_allow(mask);						\
333 		__ret = (sch)->ops.op(args);					\
334 		scx_kf_disallow(mask);						\
335 	} else {								\
336 		__ret = (sch)->ops.op(args);					\
337 	}									\
338 	if (rq)									\
339 		update_locked_rq(NULL);						\
340 	__ret;									\
341 })
342 
343 /*
344  * Some kfuncs are allowed only on the tasks that are subjects of the
345  * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
346  * restrictions, the following SCX_CALL_OP_*() variants should be used when
347  * invoking scx_ops operations that take task arguments. These can only be used
348  * for non-nesting operations due to the way the tasks are tracked.
349  *
350  * kfuncs which can only operate on such tasks can in turn use
351  * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
352  * the specific task.
353  */
354 #define SCX_CALL_OP_TASK(sch, mask, op, rq, task, args...)			\
355 do {										\
356 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
357 	current->scx.kf_tasks[0] = task;					\
358 	SCX_CALL_OP((sch), mask, op, rq, task, ##args);				\
359 	current->scx.kf_tasks[0] = NULL;					\
360 } while (0)
361 
362 #define SCX_CALL_OP_TASK_RET(sch, mask, op, rq, task, args...)			\
363 ({										\
364 	__typeof__((sch)->ops.op(task, ##args)) __ret;				\
365 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
366 	current->scx.kf_tasks[0] = task;					\
367 	__ret = SCX_CALL_OP_RET((sch), mask, op, rq, task, ##args);		\
368 	current->scx.kf_tasks[0] = NULL;					\
369 	__ret;									\
370 })
371 
372 #define SCX_CALL_OP_2TASKS_RET(sch, mask, op, rq, task0, task1, args...)	\
373 ({										\
374 	__typeof__((sch)->ops.op(task0, task1, ##args)) __ret;			\
375 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
376 	current->scx.kf_tasks[0] = task0;					\
377 	current->scx.kf_tasks[1] = task1;					\
378 	__ret = SCX_CALL_OP_RET((sch), mask, op, rq, task0, task1, ##args);	\
379 	current->scx.kf_tasks[0] = NULL;					\
380 	current->scx.kf_tasks[1] = NULL;					\
381 	__ret;									\
382 })
383 
384 /* @mask is constant, always inline to cull unnecessary branches */
385 static __always_inline bool scx_kf_allowed(struct scx_sched *sch, u32 mask)
386 {
387 	if (unlikely(!(current->scx.kf_mask & mask))) {
388 		scx_error(sch, "kfunc with mask 0x%x called from an operation only allowing 0x%x",
389 			  mask, current->scx.kf_mask);
390 		return false;
391 	}
392 
393 	/*
394 	 * Enforce nesting boundaries. e.g. A kfunc which can be called from
395 	 * DISPATCH must not be called if we're running DEQUEUE which is nested
396 	 * inside ops.dispatch(). We don't need to check boundaries for any
397 	 * blocking kfuncs as the verifier ensures they're only called from
398 	 * sleepable progs.
399 	 */
400 	if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
401 		     (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
402 		scx_error(sch, "cpu_release kfunc called from a nested operation");
403 		return false;
404 	}
405 
406 	if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
407 		     (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
408 		scx_error(sch, "dispatch kfunc called from a nested operation");
409 		return false;
410 	}
411 
412 	return true;
413 }
414 
415 /* see SCX_CALL_OP_TASK() */
416 static __always_inline bool scx_kf_allowed_on_arg_tasks(struct scx_sched *sch,
417 							u32 mask,
418 							struct task_struct *p)
419 {
420 	if (!scx_kf_allowed(sch, mask))
421 		return false;
422 
423 	if (unlikely((p != current->scx.kf_tasks[0] &&
424 		      p != current->scx.kf_tasks[1]))) {
425 		scx_error(sch, "called on a task not being operated on");
426 		return false;
427 	}
428 
429 	return true;
430 }
431 
432 /**
433  * nldsq_next_task - Iterate to the next task in a non-local DSQ
434  * @dsq: user dsq being iterated
435  * @cur: current position, %NULL to start iteration
436  * @rev: walk backwards
437  *
438  * Returns %NULL when iteration is finished.
439  */
440 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
441 					   struct task_struct *cur, bool rev)
442 {
443 	struct list_head *list_node;
444 	struct scx_dsq_list_node *dsq_lnode;
445 
446 	lockdep_assert_held(&dsq->lock);
447 
448 	if (cur)
449 		list_node = &cur->scx.dsq_list.node;
450 	else
451 		list_node = &dsq->list;
452 
453 	/* find the next task, need to skip BPF iteration cursors */
454 	do {
455 		if (rev)
456 			list_node = list_node->prev;
457 		else
458 			list_node = list_node->next;
459 
460 		if (list_node == &dsq->list)
461 			return NULL;
462 
463 		dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
464 					 node);
465 	} while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
466 
467 	return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
468 }
469 
470 #define nldsq_for_each_task(p, dsq)						\
471 	for ((p) = nldsq_next_task((dsq), NULL, false); (p);			\
472 	     (p) = nldsq_next_task((dsq), (p), false))
473 
474 
475 /*
476  * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
477  * dispatch order. BPF-visible iterator is opaque and larger to allow future
478  * changes without breaking backward compatibility. Can be used with
479  * bpf_for_each(). See bpf_iter_scx_dsq_*().
480  */
481 enum scx_dsq_iter_flags {
482 	/* iterate in the reverse dispatch order */
483 	SCX_DSQ_ITER_REV		= 1U << 16,
484 
485 	__SCX_DSQ_ITER_HAS_SLICE	= 1U << 30,
486 	__SCX_DSQ_ITER_HAS_VTIME	= 1U << 31,
487 
488 	__SCX_DSQ_ITER_USER_FLAGS	= SCX_DSQ_ITER_REV,
489 	__SCX_DSQ_ITER_ALL_FLAGS	= __SCX_DSQ_ITER_USER_FLAGS |
490 					  __SCX_DSQ_ITER_HAS_SLICE |
491 					  __SCX_DSQ_ITER_HAS_VTIME,
492 };
493 
494 struct bpf_iter_scx_dsq_kern {
495 	struct scx_dsq_list_node	cursor;
496 	struct scx_dispatch_q		*dsq;
497 	u64				slice;
498 	u64				vtime;
499 } __attribute__((aligned(8)));
500 
501 struct bpf_iter_scx_dsq {
502 	u64				__opaque[6];
503 } __attribute__((aligned(8)));
504 
505 
506 /*
507  * SCX task iterator.
508  */
509 struct scx_task_iter {
510 	struct sched_ext_entity		cursor;
511 	struct task_struct		*locked_task;
512 	struct rq			*rq;
513 	struct rq_flags			rf;
514 	u32				cnt;
515 	bool				list_locked;
516 };
517 
518 /**
519  * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
520  * @iter: iterator to init
521  *
522  * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter
523  * must eventually be stopped with scx_task_iter_stop().
524  *
525  * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
526  * between this and the first next() call or between any two next() calls. If
527  * the locks are released between two next() calls, the caller is responsible
528  * for ensuring that the task being iterated remains accessible either through
529  * RCU read lock or obtaining a reference count.
530  *
531  * All tasks which existed when the iteration started are guaranteed to be
532  * visited as long as they are not dead.
533  */
534 static void scx_task_iter_start(struct scx_task_iter *iter)
535 {
536 	memset(iter, 0, sizeof(*iter));
537 
538 	raw_spin_lock_irq(&scx_tasks_lock);
539 
540 	iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
541 	list_add(&iter->cursor.tasks_node, &scx_tasks);
542 	iter->list_locked = true;
543 }
544 
545 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
546 {
547 	if (iter->locked_task) {
548 		task_rq_unlock(iter->rq, iter->locked_task, &iter->rf);
549 		iter->locked_task = NULL;
550 	}
551 }
552 
553 /**
554  * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
555  * @iter: iterator to unlock
556  *
557  * If @iter is in the middle of a locked iteration, it may be locking the rq of
558  * the task currently being visited in addition to scx_tasks_lock. Unlock both.
559  * This function can be safely called anytime during an iteration. The next
560  * iterator operation will automatically restore the necessary locking.
561  */
562 static void scx_task_iter_unlock(struct scx_task_iter *iter)
563 {
564 	__scx_task_iter_rq_unlock(iter);
565 	if (iter->list_locked) {
566 		iter->list_locked = false;
567 		raw_spin_unlock_irq(&scx_tasks_lock);
568 	}
569 }
570 
571 static void __scx_task_iter_maybe_relock(struct scx_task_iter *iter)
572 {
573 	if (!iter->list_locked) {
574 		raw_spin_lock_irq(&scx_tasks_lock);
575 		iter->list_locked = true;
576 	}
577 }
578 
579 /**
580  * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
581  * @iter: iterator to exit
582  *
583  * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
584  * which is released on return. If the iterator holds a task's rq lock, that rq
585  * lock is also released. See scx_task_iter_start() for details.
586  */
587 static void scx_task_iter_stop(struct scx_task_iter *iter)
588 {
589 	__scx_task_iter_maybe_relock(iter);
590 	list_del_init(&iter->cursor.tasks_node);
591 	scx_task_iter_unlock(iter);
592 }
593 
594 /**
595  * scx_task_iter_next - Next task
596  * @iter: iterator to walk
597  *
598  * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
599  * and re-acquired every %SCX_TASK_ITER_BATCH iterations to avoid causing stalls
600  * by holding scx_tasks_lock for too long.
601  */
602 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
603 {
604 	struct list_head *cursor = &iter->cursor.tasks_node;
605 	struct sched_ext_entity *pos;
606 
607 	if (!(++iter->cnt % SCX_TASK_ITER_BATCH)) {
608 		scx_task_iter_unlock(iter);
609 		cond_resched();
610 	}
611 
612 	__scx_task_iter_maybe_relock(iter);
613 
614 	list_for_each_entry(pos, cursor, tasks_node) {
615 		if (&pos->tasks_node == &scx_tasks)
616 			return NULL;
617 		if (!(pos->flags & SCX_TASK_CURSOR)) {
618 			list_move(cursor, &pos->tasks_node);
619 			return container_of(pos, struct task_struct, scx);
620 		}
621 	}
622 
623 	/* can't happen, should always terminate at scx_tasks above */
624 	BUG();
625 }
626 
627 /**
628  * scx_task_iter_next_locked - Next non-idle task with its rq locked
629  * @iter: iterator to walk
630  *
631  * Visit the non-idle task with its rq lock held. Allows callers to specify
632  * whether they would like to filter out dead tasks. See scx_task_iter_start()
633  * for details.
634  */
635 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
636 {
637 	struct task_struct *p;
638 
639 	__scx_task_iter_rq_unlock(iter);
640 
641 	while ((p = scx_task_iter_next(iter))) {
642 		/*
643 		 * scx_task_iter is used to prepare and move tasks into SCX
644 		 * while loading the BPF scheduler and vice-versa while
645 		 * unloading. The init_tasks ("swappers") should be excluded
646 		 * from the iteration because:
647 		 *
648 		 * - It's unsafe to use __setschduler_prio() on an init_task to
649 		 *   determine the sched_class to use as it won't preserve its
650 		 *   idle_sched_class.
651 		 *
652 		 * - ops.init/exit_task() can easily be confused if called with
653 		 *   init_tasks as they, e.g., share PID 0.
654 		 *
655 		 * As init_tasks are never scheduled through SCX, they can be
656 		 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
657 		 * doesn't work here:
658 		 *
659 		 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
660 		 *   yet been onlined.
661 		 *
662 		 * - %PF_IDLE can be set on tasks that are not init_tasks. See
663 		 *   play_idle_precise() used by CONFIG_IDLE_INJECT.
664 		 *
665 		 * Test for idle_sched_class as only init_tasks are on it.
666 		 */
667 		if (p->sched_class != &idle_sched_class)
668 			break;
669 	}
670 	if (!p)
671 		return NULL;
672 
673 	iter->rq = task_rq_lock(p, &iter->rf);
674 	iter->locked_task = p;
675 
676 	return p;
677 }
678 
679 /**
680  * scx_add_event - Increase an event counter for 'name' by 'cnt'
681  * @sch: scx_sched to account events for
682  * @name: an event name defined in struct scx_event_stats
683  * @cnt: the number of the event occurred
684  *
685  * This can be used when preemption is not disabled.
686  */
687 #define scx_add_event(sch, name, cnt) do {					\
688 	this_cpu_add((sch)->pcpu->event_stats.name, (cnt));			\
689 	trace_sched_ext_event(#name, (cnt));					\
690 } while(0)
691 
692 /**
693  * __scx_add_event - Increase an event counter for 'name' by 'cnt'
694  * @sch: scx_sched to account events for
695  * @name: an event name defined in struct scx_event_stats
696  * @cnt: the number of the event occurred
697  *
698  * This should be used only when preemption is disabled.
699  */
700 #define __scx_add_event(sch, name, cnt) do {					\
701 	__this_cpu_add((sch)->pcpu->event_stats.name, (cnt));			\
702 	trace_sched_ext_event(#name, cnt);					\
703 } while(0)
704 
705 /**
706  * scx_agg_event - Aggregate an event counter 'kind' from 'src_e' to 'dst_e'
707  * @dst_e: destination event stats
708  * @src_e: source event stats
709  * @kind: a kind of event to be aggregated
710  */
711 #define scx_agg_event(dst_e, src_e, kind) do {					\
712 	(dst_e)->kind += READ_ONCE((src_e)->kind);				\
713 } while(0)
714 
715 /**
716  * scx_dump_event - Dump an event 'kind' in 'events' to 's'
717  * @s: output seq_buf
718  * @events: event stats
719  * @kind: a kind of event to dump
720  */
721 #define scx_dump_event(s, events, kind) do {					\
722 	dump_line(&(s), "%40s: %16lld", #kind, (events)->kind);			\
723 } while (0)
724 
725 
726 static void scx_read_events(struct scx_sched *sch,
727 			    struct scx_event_stats *events);
728 
729 static enum scx_enable_state scx_enable_state(void)
730 {
731 	return atomic_read(&scx_enable_state_var);
732 }
733 
734 static enum scx_enable_state scx_set_enable_state(enum scx_enable_state to)
735 {
736 	return atomic_xchg(&scx_enable_state_var, to);
737 }
738 
739 static bool scx_tryset_enable_state(enum scx_enable_state to,
740 				    enum scx_enable_state from)
741 {
742 	int from_v = from;
743 
744 	return atomic_try_cmpxchg(&scx_enable_state_var, &from_v, to);
745 }
746 
747 /**
748  * wait_ops_state - Busy-wait the specified ops state to end
749  * @p: target task
750  * @opss: state to wait the end of
751  *
752  * Busy-wait for @p to transition out of @opss. This can only be used when the
753  * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
754  * has load_acquire semantics to ensure that the caller can see the updates made
755  * in the enqueueing and dispatching paths.
756  */
757 static void wait_ops_state(struct task_struct *p, unsigned long opss)
758 {
759 	do {
760 		cpu_relax();
761 	} while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
762 }
763 
764 static inline bool __cpu_valid(s32 cpu)
765 {
766 	return likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu));
767 }
768 
769 /**
770  * ops_cpu_valid - Verify a cpu number, to be used on ops input args
771  * @sch: scx_sched to abort on error
772  * @cpu: cpu number which came from a BPF ops
773  * @where: extra information reported on error
774  *
775  * @cpu is a cpu number which came from the BPF scheduler and can be any value.
776  * Verify that it is in range and one of the possible cpus. If invalid, trigger
777  * an ops error.
778  */
779 static bool ops_cpu_valid(struct scx_sched *sch, s32 cpu, const char *where)
780 {
781 	if (__cpu_valid(cpu)) {
782 		return true;
783 	} else {
784 		scx_error(sch, "invalid CPU %d%s%s", cpu, where ? " " : "", where ?: "");
785 		return false;
786 	}
787 }
788 
789 /**
790  * ops_sanitize_err - Sanitize a -errno value
791  * @sch: scx_sched to error out on error
792  * @ops_name: operation to blame on failure
793  * @err: -errno value to sanitize
794  *
795  * Verify @err is a valid -errno. If not, trigger scx_error() and return
796  * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
797  * cause misbehaviors. For an example, a large negative return from
798  * ops.init_task() triggers an oops when passed up the call chain because the
799  * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
800  * handled as a pointer.
801  */
802 static int ops_sanitize_err(struct scx_sched *sch, const char *ops_name, s32 err)
803 {
804 	if (err < 0 && err >= -MAX_ERRNO)
805 		return err;
806 
807 	scx_error(sch, "ops.%s() returned an invalid errno %d", ops_name, err);
808 	return -EPROTO;
809 }
810 
811 static void run_deferred(struct rq *rq)
812 {
813 	process_ddsp_deferred_locals(rq);
814 
815 	if (local_read(&rq->scx.reenq_local_deferred)) {
816 		local_set(&rq->scx.reenq_local_deferred, 0);
817 		reenq_local(rq);
818 	}
819 }
820 
821 static void deferred_bal_cb_workfn(struct rq *rq)
822 {
823 	run_deferred(rq);
824 }
825 
826 static void deferred_irq_workfn(struct irq_work *irq_work)
827 {
828 	struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
829 
830 	raw_spin_rq_lock(rq);
831 	run_deferred(rq);
832 	raw_spin_rq_unlock(rq);
833 }
834 
835 /**
836  * schedule_deferred - Schedule execution of deferred actions on an rq
837  * @rq: target rq
838  *
839  * Schedule execution of deferred actions on @rq. Deferred actions are executed
840  * with @rq locked but unpinned, and thus can unlock @rq to e.g. migrate tasks
841  * to other rqs.
842  */
843 static void schedule_deferred(struct rq *rq)
844 {
845 	/*
846 	 * Queue an irq work. They are executed on IRQ re-enable which may take
847 	 * a bit longer than the scheduler hook in schedule_deferred_locked().
848 	 */
849 	irq_work_queue(&rq->scx.deferred_irq_work);
850 }
851 
852 /**
853  * schedule_deferred_locked - Schedule execution of deferred actions on an rq
854  * @rq: target rq
855  *
856  * Schedule execution of deferred actions on @rq. Equivalent to
857  * schedule_deferred() but requires @rq to be locked and can be more efficient.
858  */
859 static void schedule_deferred_locked(struct rq *rq)
860 {
861 	lockdep_assert_rq_held(rq);
862 
863 	/*
864 	 * If in the middle of waking up a task, task_woken_scx() will be called
865 	 * afterwards which will then run the deferred actions, no need to
866 	 * schedule anything.
867 	 */
868 	if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
869 		return;
870 
871 	/* Don't do anything if there already is a deferred operation. */
872 	if (rq->scx.flags & SCX_RQ_BAL_CB_PENDING)
873 		return;
874 
875 	/*
876 	 * If in balance, the balance callbacks will be called before rq lock is
877 	 * released. Schedule one.
878 	 *
879 	 *
880 	 * We can't directly insert the callback into the
881 	 * rq's list: The call can drop its lock and make the pending balance
882 	 * callback visible to unrelated code paths that call rq_pin_lock().
883 	 *
884 	 * Just let balance_one() know that it must do it itself.
885 	 */
886 	if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
887 		rq->scx.flags |= SCX_RQ_BAL_CB_PENDING;
888 		return;
889 	}
890 
891 	/*
892 	 * No scheduler hooks available. Use the generic irq_work path. The
893 	 * above WAKEUP and BALANCE paths should cover most of the cases and the
894 	 * time to IRQ re-enable shouldn't be long.
895 	 */
896 	schedule_deferred(rq);
897 }
898 
899 /**
900  * touch_core_sched - Update timestamp used for core-sched task ordering
901  * @rq: rq to read clock from, must be locked
902  * @p: task to update the timestamp for
903  *
904  * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
905  * implement global or local-DSQ FIFO ordering for core-sched. Should be called
906  * when a task becomes runnable and its turn on the CPU ends (e.g. slice
907  * exhaustion).
908  */
909 static void touch_core_sched(struct rq *rq, struct task_struct *p)
910 {
911 	lockdep_assert_rq_held(rq);
912 
913 #ifdef CONFIG_SCHED_CORE
914 	/*
915 	 * It's okay to update the timestamp spuriously. Use
916 	 * sched_core_disabled() which is cheaper than enabled().
917 	 *
918 	 * As this is used to determine ordering between tasks of sibling CPUs,
919 	 * it may be better to use per-core dispatch sequence instead.
920 	 */
921 	if (!sched_core_disabled())
922 		p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
923 #endif
924 }
925 
926 /**
927  * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
928  * @rq: rq to read clock from, must be locked
929  * @p: task being dispatched
930  *
931  * If the BPF scheduler implements custom core-sched ordering via
932  * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
933  * ordering within each local DSQ. This function is called from dispatch paths
934  * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
935  */
936 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
937 {
938 	lockdep_assert_rq_held(rq);
939 
940 #ifdef CONFIG_SCHED_CORE
941 	if (unlikely(SCX_HAS_OP(scx_root, core_sched_before)))
942 		touch_core_sched(rq, p);
943 #endif
944 }
945 
946 static void update_curr_scx(struct rq *rq)
947 {
948 	struct task_struct *curr = rq->curr;
949 	s64 delta_exec;
950 
951 	delta_exec = update_curr_common(rq);
952 	if (unlikely(delta_exec <= 0))
953 		return;
954 
955 	if (curr->scx.slice != SCX_SLICE_INF) {
956 		curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
957 		if (!curr->scx.slice)
958 			touch_core_sched(rq, curr);
959 	}
960 }
961 
962 static bool scx_dsq_priq_less(struct rb_node *node_a,
963 			      const struct rb_node *node_b)
964 {
965 	const struct task_struct *a =
966 		container_of(node_a, struct task_struct, scx.dsq_priq);
967 	const struct task_struct *b =
968 		container_of(node_b, struct task_struct, scx.dsq_priq);
969 
970 	return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
971 }
972 
973 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
974 {
975 	/* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
976 	WRITE_ONCE(dsq->nr, dsq->nr + delta);
977 }
978 
979 static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p)
980 {
981 	p->scx.slice = READ_ONCE(scx_slice_dfl);
982 	__scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1);
983 }
984 
985 static void local_dsq_post_enq(struct scx_dispatch_q *dsq, struct task_struct *p,
986 			       u64 enq_flags)
987 {
988 	struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
989 	bool preempt = false;
990 
991 	/*
992 	 * If @rq is in balance, the CPU is already vacant and looking for the
993 	 * next task to run. No need to preempt or trigger resched after moving
994 	 * @p into its local DSQ.
995 	 */
996 	if (rq->scx.flags & SCX_RQ_IN_BALANCE)
997 		return;
998 
999 	if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
1000 	    rq->curr->sched_class == &ext_sched_class) {
1001 		rq->curr->scx.slice = 0;
1002 		preempt = true;
1003 	}
1004 
1005 	if (preempt || sched_class_above(&ext_sched_class, rq->curr->sched_class))
1006 		resched_curr(rq);
1007 }
1008 
1009 static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
1010 			     struct task_struct *p, u64 enq_flags)
1011 {
1012 	bool is_local = dsq->id == SCX_DSQ_LOCAL;
1013 
1014 	WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1015 	WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1016 		     !RB_EMPTY_NODE(&p->scx.dsq_priq));
1017 
1018 	if (!is_local) {
1019 		raw_spin_lock_nested(&dsq->lock,
1020 			(enq_flags & SCX_ENQ_NESTED) ? SINGLE_DEPTH_NESTING : 0);
1021 
1022 		if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1023 			scx_error(sch, "attempting to dispatch to a destroyed dsq");
1024 			/* fall back to the global dsq */
1025 			raw_spin_unlock(&dsq->lock);
1026 			dsq = find_global_dsq(sch, p);
1027 			raw_spin_lock(&dsq->lock);
1028 		}
1029 	}
1030 
1031 	if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1032 		     (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1033 		/*
1034 		 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1035 		 * their FIFO queues. To avoid confusion and accidentally
1036 		 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1037 		 * disallow any internal DSQ from doing vtime ordering of
1038 		 * tasks.
1039 		 */
1040 		scx_error(sch, "cannot use vtime ordering for built-in DSQs");
1041 		enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1042 	}
1043 
1044 	if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1045 		struct rb_node *rbp;
1046 
1047 		/*
1048 		 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1049 		 * linked to both the rbtree and list on PRIQs, this can only be
1050 		 * tested easily when adding the first task.
1051 		 */
1052 		if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1053 			     nldsq_next_task(dsq, NULL, false)))
1054 			scx_error(sch, "DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1055 				  dsq->id);
1056 
1057 		p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1058 		rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1059 
1060 		/*
1061 		 * Find the previous task and insert after it on the list so
1062 		 * that @dsq->list is vtime ordered.
1063 		 */
1064 		rbp = rb_prev(&p->scx.dsq_priq);
1065 		if (rbp) {
1066 			struct task_struct *prev =
1067 				container_of(rbp, struct task_struct,
1068 					     scx.dsq_priq);
1069 			list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
1070 			/* first task unchanged - no update needed */
1071 		} else {
1072 			list_add(&p->scx.dsq_list.node, &dsq->list);
1073 			/* not builtin and new task is at head - use fastpath */
1074 			rcu_assign_pointer(dsq->first_task, p);
1075 		}
1076 	} else {
1077 		/* a FIFO DSQ shouldn't be using PRIQ enqueuing */
1078 		if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
1079 			scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
1080 				  dsq->id);
1081 
1082 		if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) {
1083 			list_add(&p->scx.dsq_list.node, &dsq->list);
1084 			/* new task inserted at head - use fastpath */
1085 			if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN))
1086 				rcu_assign_pointer(dsq->first_task, p);
1087 		} else {
1088 			bool was_empty;
1089 
1090 			was_empty = list_empty(&dsq->list);
1091 			list_add_tail(&p->scx.dsq_list.node, &dsq->list);
1092 			if (was_empty && !(dsq->id & SCX_DSQ_FLAG_BUILTIN))
1093 				rcu_assign_pointer(dsq->first_task, p);
1094 		}
1095 	}
1096 
1097 	/* seq records the order tasks are queued, used by BPF DSQ iterator */
1098 	dsq->seq++;
1099 	p->scx.dsq_seq = dsq->seq;
1100 
1101 	dsq_mod_nr(dsq, 1);
1102 	p->scx.dsq = dsq;
1103 
1104 	/*
1105 	 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
1106 	 * direct dispatch path, but we clear them here because the direct
1107 	 * dispatch verdict may be overridden on the enqueue path during e.g.
1108 	 * bypass.
1109 	 */
1110 	p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1111 	p->scx.ddsp_enq_flags = 0;
1112 
1113 	/*
1114 	 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
1115 	 * match waiters' load_acquire.
1116 	 */
1117 	if (enq_flags & SCX_ENQ_CLEAR_OPSS)
1118 		atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1119 
1120 	if (is_local)
1121 		local_dsq_post_enq(dsq, p, enq_flags);
1122 	else
1123 		raw_spin_unlock(&dsq->lock);
1124 }
1125 
1126 static void task_unlink_from_dsq(struct task_struct *p,
1127 				 struct scx_dispatch_q *dsq)
1128 {
1129 	WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
1130 
1131 	if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
1132 		rb_erase(&p->scx.dsq_priq, &dsq->priq);
1133 		RB_CLEAR_NODE(&p->scx.dsq_priq);
1134 		p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1135 	}
1136 
1137 	list_del_init(&p->scx.dsq_list.node);
1138 	dsq_mod_nr(dsq, -1);
1139 
1140 	if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN) && dsq->first_task == p) {
1141 		struct task_struct *first_task;
1142 
1143 		first_task = nldsq_next_task(dsq, NULL, false);
1144 		rcu_assign_pointer(dsq->first_task, first_task);
1145 	}
1146 }
1147 
1148 static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
1149 {
1150 	struct scx_dispatch_q *dsq = p->scx.dsq;
1151 	bool is_local = dsq == &rq->scx.local_dsq;
1152 
1153 	lockdep_assert_rq_held(rq);
1154 
1155 	if (!dsq) {
1156 		/*
1157 		 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
1158 		 * Unlinking is all that's needed to cancel.
1159 		 */
1160 		if (unlikely(!list_empty(&p->scx.dsq_list.node)))
1161 			list_del_init(&p->scx.dsq_list.node);
1162 
1163 		/*
1164 		 * When dispatching directly from the BPF scheduler to a local
1165 		 * DSQ, the task isn't associated with any DSQ but
1166 		 * @p->scx.holding_cpu may be set under the protection of
1167 		 * %SCX_OPSS_DISPATCHING.
1168 		 */
1169 		if (p->scx.holding_cpu >= 0)
1170 			p->scx.holding_cpu = -1;
1171 
1172 		return;
1173 	}
1174 
1175 	if (!is_local)
1176 		raw_spin_lock(&dsq->lock);
1177 
1178 	/*
1179 	 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1180 	 * change underneath us.
1181 	*/
1182 	if (p->scx.holding_cpu < 0) {
1183 		/* @p must still be on @dsq, dequeue */
1184 		task_unlink_from_dsq(p, dsq);
1185 	} else {
1186 		/*
1187 		 * We're racing against dispatch_to_local_dsq() which already
1188 		 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1189 		 * holding_cpu which tells dispatch_to_local_dsq() that it lost
1190 		 * the race.
1191 		 */
1192 		WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
1193 		p->scx.holding_cpu = -1;
1194 	}
1195 	p->scx.dsq = NULL;
1196 
1197 	if (!is_local)
1198 		raw_spin_unlock(&dsq->lock);
1199 }
1200 
1201 /*
1202  * Abbreviated version of dispatch_dequeue() that can be used when both @p's rq
1203  * and dsq are locked.
1204  */
1205 static void dispatch_dequeue_locked(struct task_struct *p,
1206 				    struct scx_dispatch_q *dsq)
1207 {
1208 	lockdep_assert_rq_held(task_rq(p));
1209 	lockdep_assert_held(&dsq->lock);
1210 
1211 	task_unlink_from_dsq(p, dsq);
1212 	p->scx.dsq = NULL;
1213 }
1214 
1215 static struct scx_dispatch_q *find_dsq_for_dispatch(struct scx_sched *sch,
1216 						    struct rq *rq, u64 dsq_id,
1217 						    struct task_struct *p)
1218 {
1219 	struct scx_dispatch_q *dsq;
1220 
1221 	if (dsq_id == SCX_DSQ_LOCAL)
1222 		return &rq->scx.local_dsq;
1223 
1224 	if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
1225 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
1226 
1227 		if (!ops_cpu_valid(sch, cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
1228 			return find_global_dsq(sch, p);
1229 
1230 		return &cpu_rq(cpu)->scx.local_dsq;
1231 	}
1232 
1233 	if (dsq_id == SCX_DSQ_GLOBAL)
1234 		dsq = find_global_dsq(sch, p);
1235 	else
1236 		dsq = find_user_dsq(sch, dsq_id);
1237 
1238 	if (unlikely(!dsq)) {
1239 		scx_error(sch, "non-existent DSQ 0x%llx for %s[%d]",
1240 			  dsq_id, p->comm, p->pid);
1241 		return find_global_dsq(sch, p);
1242 	}
1243 
1244 	return dsq;
1245 }
1246 
1247 static void mark_direct_dispatch(struct scx_sched *sch,
1248 				 struct task_struct *ddsp_task,
1249 				 struct task_struct *p, u64 dsq_id,
1250 				 u64 enq_flags)
1251 {
1252 	/*
1253 	 * Mark that dispatch already happened from ops.select_cpu() or
1254 	 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
1255 	 * which can never match a valid task pointer.
1256 	 */
1257 	__this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
1258 
1259 	/* @p must match the task on the enqueue path */
1260 	if (unlikely(p != ddsp_task)) {
1261 		if (IS_ERR(ddsp_task))
1262 			scx_error(sch, "%s[%d] already direct-dispatched",
1263 				  p->comm, p->pid);
1264 		else
1265 			scx_error(sch, "scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
1266 				  ddsp_task->comm, ddsp_task->pid,
1267 				  p->comm, p->pid);
1268 		return;
1269 	}
1270 
1271 	WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
1272 	WARN_ON_ONCE(p->scx.ddsp_enq_flags);
1273 
1274 	p->scx.ddsp_dsq_id = dsq_id;
1275 	p->scx.ddsp_enq_flags = enq_flags;
1276 }
1277 
1278 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
1279 			    u64 enq_flags)
1280 {
1281 	struct rq *rq = task_rq(p);
1282 	struct scx_dispatch_q *dsq =
1283 		find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
1284 
1285 	touch_core_sched_dispatch(rq, p);
1286 
1287 	p->scx.ddsp_enq_flags |= enq_flags;
1288 
1289 	/*
1290 	 * We are in the enqueue path with @rq locked and pinned, and thus can't
1291 	 * double lock a remote rq and enqueue to its local DSQ. For
1292 	 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
1293 	 * the enqueue so that it's executed when @rq can be unlocked.
1294 	 */
1295 	if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
1296 		unsigned long opss;
1297 
1298 		opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
1299 
1300 		switch (opss & SCX_OPSS_STATE_MASK) {
1301 		case SCX_OPSS_NONE:
1302 			break;
1303 		case SCX_OPSS_QUEUEING:
1304 			/*
1305 			 * As @p was never passed to the BPF side, _release is
1306 			 * not strictly necessary. Still do it for consistency.
1307 			 */
1308 			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1309 			break;
1310 		default:
1311 			WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
1312 				  p->comm, p->pid, opss);
1313 			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1314 			break;
1315 		}
1316 
1317 		WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1318 		list_add_tail(&p->scx.dsq_list.node,
1319 			      &rq->scx.ddsp_deferred_locals);
1320 		schedule_deferred_locked(rq);
1321 		return;
1322 	}
1323 
1324 	dispatch_enqueue(sch, dsq, p,
1325 			 p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1326 }
1327 
1328 static bool scx_rq_online(struct rq *rq)
1329 {
1330 	/*
1331 	 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
1332 	 * the online state as seen from the BPF scheduler. cpu_active() test
1333 	 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
1334 	 * stay set until the current scheduling operation is complete even if
1335 	 * we aren't locking @rq.
1336 	 */
1337 	return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
1338 }
1339 
1340 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
1341 			    int sticky_cpu)
1342 {
1343 	struct scx_sched *sch = scx_root;
1344 	struct task_struct **ddsp_taskp;
1345 	struct scx_dispatch_q *dsq;
1346 	unsigned long qseq;
1347 
1348 	WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
1349 
1350 	/* rq migration */
1351 	if (sticky_cpu == cpu_of(rq))
1352 		goto local_norefill;
1353 
1354 	/*
1355 	 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
1356 	 * is offline and are just running the hotplug path. Don't bother the
1357 	 * BPF scheduler.
1358 	 */
1359 	if (!scx_rq_online(rq))
1360 		goto local;
1361 
1362 	if (scx_rq_bypassing(rq)) {
1363 		__scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
1364 		goto bypass;
1365 	}
1366 
1367 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1368 		goto direct;
1369 
1370 	/* see %SCX_OPS_ENQ_EXITING */
1371 	if (!(sch->ops.flags & SCX_OPS_ENQ_EXITING) &&
1372 	    unlikely(p->flags & PF_EXITING)) {
1373 		__scx_add_event(sch, SCX_EV_ENQ_SKIP_EXITING, 1);
1374 		goto local;
1375 	}
1376 
1377 	/* see %SCX_OPS_ENQ_MIGRATION_DISABLED */
1378 	if (!(sch->ops.flags & SCX_OPS_ENQ_MIGRATION_DISABLED) &&
1379 	    is_migration_disabled(p)) {
1380 		__scx_add_event(sch, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1);
1381 		goto local;
1382 	}
1383 
1384 	if (unlikely(!SCX_HAS_OP(sch, enqueue)))
1385 		goto global;
1386 
1387 	/* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
1388 	qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
1389 
1390 	WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
1391 	atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
1392 
1393 	ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
1394 	WARN_ON_ONCE(*ddsp_taskp);
1395 	*ddsp_taskp = p;
1396 
1397 	SCX_CALL_OP_TASK(sch, SCX_KF_ENQUEUE, enqueue, rq, p, enq_flags);
1398 
1399 	*ddsp_taskp = NULL;
1400 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1401 		goto direct;
1402 
1403 	/*
1404 	 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
1405 	 * dequeue may be waiting. The store_release matches their load_acquire.
1406 	 */
1407 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
1408 	return;
1409 
1410 direct:
1411 	direct_dispatch(sch, p, enq_flags);
1412 	return;
1413 local_norefill:
1414 	dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags);
1415 	return;
1416 local:
1417 	dsq = &rq->scx.local_dsq;
1418 	goto enqueue;
1419 global:
1420 	dsq = find_global_dsq(sch, p);
1421 	goto enqueue;
1422 bypass:
1423 	dsq = &task_rq(p)->scx.bypass_dsq;
1424 	goto enqueue;
1425 
1426 enqueue:
1427 	/*
1428 	 * For task-ordering, slice refill must be treated as implying the end
1429 	 * of the current slice. Otherwise, the longer @p stays on the CPU, the
1430 	 * higher priority it becomes from scx_prio_less()'s POV.
1431 	 */
1432 	touch_core_sched(rq, p);
1433 	refill_task_slice_dfl(sch, p);
1434 	dispatch_enqueue(sch, dsq, p, enq_flags);
1435 }
1436 
1437 static bool task_runnable(const struct task_struct *p)
1438 {
1439 	return !list_empty(&p->scx.runnable_node);
1440 }
1441 
1442 static void set_task_runnable(struct rq *rq, struct task_struct *p)
1443 {
1444 	lockdep_assert_rq_held(rq);
1445 
1446 	if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
1447 		p->scx.runnable_at = jiffies;
1448 		p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
1449 	}
1450 
1451 	/*
1452 	 * list_add_tail() must be used. scx_bypass() depends on tasks being
1453 	 * appended to the runnable_list.
1454 	 */
1455 	list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
1456 }
1457 
1458 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
1459 {
1460 	list_del_init(&p->scx.runnable_node);
1461 	if (reset_runnable_at)
1462 		p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
1463 }
1464 
1465 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
1466 {
1467 	struct scx_sched *sch = scx_root;
1468 	int sticky_cpu = p->scx.sticky_cpu;
1469 
1470 	if (enq_flags & ENQUEUE_WAKEUP)
1471 		rq->scx.flags |= SCX_RQ_IN_WAKEUP;
1472 
1473 	enq_flags |= rq->scx.extra_enq_flags;
1474 
1475 	if (sticky_cpu >= 0)
1476 		p->scx.sticky_cpu = -1;
1477 
1478 	/*
1479 	 * Restoring a running task will be immediately followed by
1480 	 * set_next_task_scx() which expects the task to not be on the BPF
1481 	 * scheduler as tasks can only start running through local DSQs. Force
1482 	 * direct-dispatch into the local DSQ by setting the sticky_cpu.
1483 	 */
1484 	if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
1485 		sticky_cpu = cpu_of(rq);
1486 
1487 	if (p->scx.flags & SCX_TASK_QUEUED) {
1488 		WARN_ON_ONCE(!task_runnable(p));
1489 		goto out;
1490 	}
1491 
1492 	set_task_runnable(rq, p);
1493 	p->scx.flags |= SCX_TASK_QUEUED;
1494 	rq->scx.nr_running++;
1495 	add_nr_running(rq, 1);
1496 
1497 	if (SCX_HAS_OP(sch, runnable) && !task_on_rq_migrating(p))
1498 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, runnable, rq, p, enq_flags);
1499 
1500 	if (enq_flags & SCX_ENQ_WAKEUP)
1501 		touch_core_sched(rq, p);
1502 
1503 	do_enqueue_task(rq, p, enq_flags, sticky_cpu);
1504 out:
1505 	rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
1506 
1507 	if ((enq_flags & SCX_ENQ_CPU_SELECTED) &&
1508 	    unlikely(cpu_of(rq) != p->scx.selected_cpu))
1509 		__scx_add_event(sch, SCX_EV_SELECT_CPU_FALLBACK, 1);
1510 }
1511 
1512 static void ops_dequeue(struct rq *rq, struct task_struct *p, u64 deq_flags)
1513 {
1514 	struct scx_sched *sch = scx_root;
1515 	unsigned long opss;
1516 
1517 	/* dequeue is always temporary, don't reset runnable_at */
1518 	clr_task_runnable(p, false);
1519 
1520 	/* acquire ensures that we see the preceding updates on QUEUED */
1521 	opss = atomic_long_read_acquire(&p->scx.ops_state);
1522 
1523 	switch (opss & SCX_OPSS_STATE_MASK) {
1524 	case SCX_OPSS_NONE:
1525 		break;
1526 	case SCX_OPSS_QUEUEING:
1527 		/*
1528 		 * QUEUEING is started and finished while holding @p's rq lock.
1529 		 * As we're holding the rq lock now, we shouldn't see QUEUEING.
1530 		 */
1531 		BUG();
1532 	case SCX_OPSS_QUEUED:
1533 		if (SCX_HAS_OP(sch, dequeue))
1534 			SCX_CALL_OP_TASK(sch, SCX_KF_REST, dequeue, rq,
1535 					 p, deq_flags);
1536 
1537 		if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
1538 					    SCX_OPSS_NONE))
1539 			break;
1540 		fallthrough;
1541 	case SCX_OPSS_DISPATCHING:
1542 		/*
1543 		 * If @p is being dispatched from the BPF scheduler to a DSQ,
1544 		 * wait for the transfer to complete so that @p doesn't get
1545 		 * added to its DSQ after dequeueing is complete.
1546 		 *
1547 		 * As we're waiting on DISPATCHING with the rq locked, the
1548 		 * dispatching side shouldn't try to lock the rq while
1549 		 * DISPATCHING is set. See dispatch_to_local_dsq().
1550 		 *
1551 		 * DISPATCHING shouldn't have qseq set and control can reach
1552 		 * here with NONE @opss from the above QUEUED case block.
1553 		 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
1554 		 */
1555 		wait_ops_state(p, SCX_OPSS_DISPATCHING);
1556 		BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
1557 		break;
1558 	}
1559 }
1560 
1561 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
1562 {
1563 	struct scx_sched *sch = scx_root;
1564 
1565 	if (!(p->scx.flags & SCX_TASK_QUEUED)) {
1566 		WARN_ON_ONCE(task_runnable(p));
1567 		return true;
1568 	}
1569 
1570 	ops_dequeue(rq, p, deq_flags);
1571 
1572 	/*
1573 	 * A currently running task which is going off @rq first gets dequeued
1574 	 * and then stops running. As we want running <-> stopping transitions
1575 	 * to be contained within runnable <-> quiescent transitions, trigger
1576 	 * ->stopping() early here instead of in put_prev_task_scx().
1577 	 *
1578 	 * @p may go through multiple stopping <-> running transitions between
1579 	 * here and put_prev_task_scx() if task attribute changes occur while
1580 	 * balance_scx() leaves @rq unlocked. However, they don't contain any
1581 	 * information meaningful to the BPF scheduler and can be suppressed by
1582 	 * skipping the callbacks if the task is !QUEUED.
1583 	 */
1584 	if (SCX_HAS_OP(sch, stopping) && task_current(rq, p)) {
1585 		update_curr_scx(rq);
1586 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, false);
1587 	}
1588 
1589 	if (SCX_HAS_OP(sch, quiescent) && !task_on_rq_migrating(p))
1590 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, quiescent, rq, p, deq_flags);
1591 
1592 	if (deq_flags & SCX_DEQ_SLEEP)
1593 		p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
1594 	else
1595 		p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
1596 
1597 	p->scx.flags &= ~SCX_TASK_QUEUED;
1598 	rq->scx.nr_running--;
1599 	sub_nr_running(rq, 1);
1600 
1601 	dispatch_dequeue(rq, p);
1602 	return true;
1603 }
1604 
1605 static void yield_task_scx(struct rq *rq)
1606 {
1607 	struct scx_sched *sch = scx_root;
1608 	struct task_struct *p = rq->donor;
1609 
1610 	if (SCX_HAS_OP(sch, yield))
1611 		SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, p, NULL);
1612 	else
1613 		p->scx.slice = 0;
1614 }
1615 
1616 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
1617 {
1618 	struct scx_sched *sch = scx_root;
1619 	struct task_struct *from = rq->donor;
1620 
1621 	if (SCX_HAS_OP(sch, yield))
1622 		return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq,
1623 					      from, to);
1624 	else
1625 		return false;
1626 }
1627 
1628 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
1629 					 struct scx_dispatch_q *src_dsq,
1630 					 struct rq *dst_rq)
1631 {
1632 	struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
1633 
1634 	/* @dsq is locked and @p is on @dst_rq */
1635 	lockdep_assert_held(&src_dsq->lock);
1636 	lockdep_assert_rq_held(dst_rq);
1637 
1638 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
1639 
1640 	if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
1641 		list_add(&p->scx.dsq_list.node, &dst_dsq->list);
1642 	else
1643 		list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
1644 
1645 	dsq_mod_nr(dst_dsq, 1);
1646 	p->scx.dsq = dst_dsq;
1647 
1648 	local_dsq_post_enq(dst_dsq, p, enq_flags);
1649 }
1650 
1651 /**
1652  * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
1653  * @p: task to move
1654  * @enq_flags: %SCX_ENQ_*
1655  * @src_rq: rq to move the task from, locked on entry, released on return
1656  * @dst_rq: rq to move the task into, locked on return
1657  *
1658  * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
1659  */
1660 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
1661 					  struct rq *src_rq, struct rq *dst_rq)
1662 {
1663 	lockdep_assert_rq_held(src_rq);
1664 
1665 	/* the following marks @p MIGRATING which excludes dequeue */
1666 	deactivate_task(src_rq, p, 0);
1667 	set_task_cpu(p, cpu_of(dst_rq));
1668 	p->scx.sticky_cpu = cpu_of(dst_rq);
1669 
1670 	raw_spin_rq_unlock(src_rq);
1671 	raw_spin_rq_lock(dst_rq);
1672 
1673 	/*
1674 	 * We want to pass scx-specific enq_flags but activate_task() will
1675 	 * truncate the upper 32 bit. As we own @rq, we can pass them through
1676 	 * @rq->scx.extra_enq_flags instead.
1677 	 */
1678 	WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
1679 	WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
1680 	dst_rq->scx.extra_enq_flags = enq_flags;
1681 	activate_task(dst_rq, p, 0);
1682 	dst_rq->scx.extra_enq_flags = 0;
1683 }
1684 
1685 /*
1686  * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
1687  * differences:
1688  *
1689  * - is_cpu_allowed() asks "Can this task run on this CPU?" while
1690  *   task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
1691  *   this CPU?".
1692  *
1693  *   While migration is disabled, is_cpu_allowed() has to say "yes" as the task
1694  *   must be allowed to finish on the CPU that it's currently on regardless of
1695  *   the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
1696  *   BPF scheduler shouldn't attempt to migrate a task which has migration
1697  *   disabled.
1698  *
1699  * - The BPF scheduler is bypassed while the rq is offline and we can always say
1700  *   no to the BPF scheduler initiated migrations while offline.
1701  *
1702  * The caller must ensure that @p and @rq are on different CPUs.
1703  */
1704 static bool task_can_run_on_remote_rq(struct scx_sched *sch,
1705 				      struct task_struct *p, struct rq *rq,
1706 				      bool enforce)
1707 {
1708 	int cpu = cpu_of(rq);
1709 
1710 	WARN_ON_ONCE(task_cpu(p) == cpu);
1711 
1712 	/*
1713 	 * If @p has migration disabled, @p->cpus_ptr is updated to contain only
1714 	 * the pinned CPU in migrate_disable_switch() while @p is being switched
1715 	 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is
1716 	 * updated and thus another CPU may see @p on a DSQ inbetween leading to
1717 	 * @p passing the below task_allowed_on_cpu() check while migration is
1718 	 * disabled.
1719 	 *
1720 	 * Test the migration disabled state first as the race window is narrow
1721 	 * and the BPF scheduler failing to check migration disabled state can
1722 	 * easily be masked if task_allowed_on_cpu() is done first.
1723 	 */
1724 	if (unlikely(is_migration_disabled(p))) {
1725 		if (enforce)
1726 			scx_error(sch, "SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
1727 				  p->comm, p->pid, task_cpu(p), cpu);
1728 		return false;
1729 	}
1730 
1731 	/*
1732 	 * We don't require the BPF scheduler to avoid dispatching to offline
1733 	 * CPUs mostly for convenience but also because CPUs can go offline
1734 	 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the
1735 	 * picked CPU is outside the allowed mask.
1736 	 */
1737 	if (!task_allowed_on_cpu(p, cpu)) {
1738 		if (enforce)
1739 			scx_error(sch, "SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
1740 				  cpu, p->comm, p->pid);
1741 		return false;
1742 	}
1743 
1744 	if (!scx_rq_online(rq)) {
1745 		if (enforce)
1746 			__scx_add_event(sch, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1);
1747 		return false;
1748 	}
1749 
1750 	return true;
1751 }
1752 
1753 /**
1754  * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
1755  * @p: target task
1756  * @dsq: locked DSQ @p is currently on
1757  * @src_rq: rq @p is currently on, stable with @dsq locked
1758  *
1759  * Called with @dsq locked but no rq's locked. We want to move @p to a different
1760  * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
1761  * required when transferring into a local DSQ. Even when transferring into a
1762  * non-local DSQ, it's better to use the same mechanism to protect against
1763  * dequeues and maintain the invariant that @p->scx.dsq can only change while
1764  * @src_rq is locked, which e.g. scx_dump_task() depends on.
1765  *
1766  * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
1767  * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
1768  * this may race with dequeue, which can't drop the rq lock or fail, do a little
1769  * dancing from our side.
1770  *
1771  * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
1772  * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
1773  * would be cleared to -1. While other cpus may have updated it to different
1774  * values afterwards, as this operation can't be preempted or recurse, the
1775  * holding_cpu can never become this CPU again before we're done. Thus, we can
1776  * tell whether we lost to dequeue by testing whether the holding_cpu still
1777  * points to this CPU. See dispatch_dequeue() for the counterpart.
1778  *
1779  * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
1780  * still valid. %false if lost to dequeue.
1781  */
1782 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
1783 				       struct scx_dispatch_q *dsq,
1784 				       struct rq *src_rq)
1785 {
1786 	s32 cpu = raw_smp_processor_id();
1787 
1788 	lockdep_assert_held(&dsq->lock);
1789 
1790 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
1791 	task_unlink_from_dsq(p, dsq);
1792 	p->scx.holding_cpu = cpu;
1793 
1794 	raw_spin_unlock(&dsq->lock);
1795 	raw_spin_rq_lock(src_rq);
1796 
1797 	/* task_rq couldn't have changed if we're still the holding cpu */
1798 	return likely(p->scx.holding_cpu == cpu) &&
1799 		!WARN_ON_ONCE(src_rq != task_rq(p));
1800 }
1801 
1802 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
1803 				struct scx_dispatch_q *dsq, struct rq *src_rq)
1804 {
1805 	raw_spin_rq_unlock(this_rq);
1806 
1807 	if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
1808 		move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
1809 		return true;
1810 	} else {
1811 		raw_spin_rq_unlock(src_rq);
1812 		raw_spin_rq_lock(this_rq);
1813 		return false;
1814 	}
1815 }
1816 
1817 /**
1818  * move_task_between_dsqs() - Move a task from one DSQ to another
1819  * @sch: scx_sched being operated on
1820  * @p: target task
1821  * @enq_flags: %SCX_ENQ_*
1822  * @src_dsq: DSQ @p is currently on, must not be a local DSQ
1823  * @dst_dsq: DSQ @p is being moved to, can be any DSQ
1824  *
1825  * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
1826  * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
1827  * will change. As @p's task_rq is locked, this function doesn't need to use the
1828  * holding_cpu mechanism.
1829  *
1830  * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
1831  * return value, is locked.
1832  */
1833 static struct rq *move_task_between_dsqs(struct scx_sched *sch,
1834 					 struct task_struct *p, u64 enq_flags,
1835 					 struct scx_dispatch_q *src_dsq,
1836 					 struct scx_dispatch_q *dst_dsq)
1837 {
1838 	struct rq *src_rq = task_rq(p), *dst_rq;
1839 
1840 	BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
1841 	lockdep_assert_held(&src_dsq->lock);
1842 	lockdep_assert_rq_held(src_rq);
1843 
1844 	if (dst_dsq->id == SCX_DSQ_LOCAL) {
1845 		dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
1846 		if (src_rq != dst_rq &&
1847 		    unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
1848 			dst_dsq = find_global_dsq(sch, p);
1849 			dst_rq = src_rq;
1850 		}
1851 	} else {
1852 		/* no need to migrate if destination is a non-local DSQ */
1853 		dst_rq = src_rq;
1854 	}
1855 
1856 	/*
1857 	 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
1858 	 * CPU, @p will be migrated.
1859 	 */
1860 	if (dst_dsq->id == SCX_DSQ_LOCAL) {
1861 		/* @p is going from a non-local DSQ to a local DSQ */
1862 		if (src_rq == dst_rq) {
1863 			task_unlink_from_dsq(p, src_dsq);
1864 			move_local_task_to_local_dsq(p, enq_flags,
1865 						     src_dsq, dst_rq);
1866 			raw_spin_unlock(&src_dsq->lock);
1867 		} else {
1868 			raw_spin_unlock(&src_dsq->lock);
1869 			move_remote_task_to_local_dsq(p, enq_flags,
1870 						      src_rq, dst_rq);
1871 		}
1872 	} else {
1873 		/*
1874 		 * @p is going from a non-local DSQ to a non-local DSQ. As
1875 		 * $src_dsq is already locked, do an abbreviated dequeue.
1876 		 */
1877 		dispatch_dequeue_locked(p, src_dsq);
1878 		raw_spin_unlock(&src_dsq->lock);
1879 
1880 		dispatch_enqueue(sch, dst_dsq, p, enq_flags);
1881 	}
1882 
1883 	return dst_rq;
1884 }
1885 
1886 static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq,
1887 			       struct scx_dispatch_q *dsq)
1888 {
1889 	struct task_struct *p;
1890 retry:
1891 	/*
1892 	 * The caller can't expect to successfully consume a task if the task's
1893 	 * addition to @dsq isn't guaranteed to be visible somehow. Test
1894 	 * @dsq->list without locking and skip if it seems empty.
1895 	 */
1896 	if (list_empty(&dsq->list))
1897 		return false;
1898 
1899 	raw_spin_lock(&dsq->lock);
1900 
1901 	nldsq_for_each_task(p, dsq) {
1902 		struct rq *task_rq = task_rq(p);
1903 
1904 		/*
1905 		 * This loop can lead to multiple lockup scenarios, e.g. the BPF
1906 		 * scheduler can put an enormous number of affinitized tasks into
1907 		 * a contended DSQ, or the outer retry loop can repeatedly race
1908 		 * against scx_bypass() dequeueing tasks from @dsq trying to put
1909 		 * the system into the bypass mode. This can easily live-lock the
1910 		 * machine. If aborting, exit from all non-bypass DSQs.
1911 		 */
1912 		if (unlikely(READ_ONCE(scx_aborting)) && dsq->id != SCX_DSQ_BYPASS)
1913 			break;
1914 
1915 		if (rq == task_rq) {
1916 			task_unlink_from_dsq(p, dsq);
1917 			move_local_task_to_local_dsq(p, 0, dsq, rq);
1918 			raw_spin_unlock(&dsq->lock);
1919 			return true;
1920 		}
1921 
1922 		if (task_can_run_on_remote_rq(sch, p, rq, false)) {
1923 			if (likely(consume_remote_task(rq, p, dsq, task_rq)))
1924 				return true;
1925 			goto retry;
1926 		}
1927 	}
1928 
1929 	raw_spin_unlock(&dsq->lock);
1930 	return false;
1931 }
1932 
1933 static bool consume_global_dsq(struct scx_sched *sch, struct rq *rq)
1934 {
1935 	int node = cpu_to_node(cpu_of(rq));
1936 
1937 	return consume_dispatch_q(sch, rq, sch->global_dsqs[node]);
1938 }
1939 
1940 /**
1941  * dispatch_to_local_dsq - Dispatch a task to a local dsq
1942  * @sch: scx_sched being operated on
1943  * @rq: current rq which is locked
1944  * @dst_dsq: destination DSQ
1945  * @p: task to dispatch
1946  * @enq_flags: %SCX_ENQ_*
1947  *
1948  * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
1949  * DSQ. This function performs all the synchronization dancing needed because
1950  * local DSQs are protected with rq locks.
1951  *
1952  * The caller must have exclusive ownership of @p (e.g. through
1953  * %SCX_OPSS_DISPATCHING).
1954  */
1955 static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
1956 				  struct scx_dispatch_q *dst_dsq,
1957 				  struct task_struct *p, u64 enq_flags)
1958 {
1959 	struct rq *src_rq = task_rq(p);
1960 	struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
1961 	struct rq *locked_rq = rq;
1962 
1963 	/*
1964 	 * We're synchronized against dequeue through DISPATCHING. As @p can't
1965 	 * be dequeued, its task_rq and cpus_allowed are stable too.
1966 	 *
1967 	 * If dispatching to @rq that @p is already on, no lock dancing needed.
1968 	 */
1969 	if (rq == src_rq && rq == dst_rq) {
1970 		dispatch_enqueue(sch, dst_dsq, p,
1971 				 enq_flags | SCX_ENQ_CLEAR_OPSS);
1972 		return;
1973 	}
1974 
1975 	if (src_rq != dst_rq &&
1976 	    unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
1977 		dispatch_enqueue(sch, find_global_dsq(sch, p), p,
1978 				 enq_flags | SCX_ENQ_CLEAR_OPSS);
1979 		return;
1980 	}
1981 
1982 	/*
1983 	 * @p is on a possibly remote @src_rq which we need to lock to move the
1984 	 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
1985 	 * on DISPATCHING, so we can't grab @src_rq lock while holding
1986 	 * DISPATCHING.
1987 	 *
1988 	 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
1989 	 * we're moving from a DSQ and use the same mechanism - mark the task
1990 	 * under transfer with holding_cpu, release DISPATCHING and then follow
1991 	 * the same protocol. See unlink_dsq_and_lock_src_rq().
1992 	 */
1993 	p->scx.holding_cpu = raw_smp_processor_id();
1994 
1995 	/* store_release ensures that dequeue sees the above */
1996 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1997 
1998 	/* switch to @src_rq lock */
1999 	if (locked_rq != src_rq) {
2000 		raw_spin_rq_unlock(locked_rq);
2001 		locked_rq = src_rq;
2002 		raw_spin_rq_lock(src_rq);
2003 	}
2004 
2005 	/* task_rq couldn't have changed if we're still the holding cpu */
2006 	if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2007 	    !WARN_ON_ONCE(src_rq != task_rq(p))) {
2008 		/*
2009 		 * If @p is staying on the same rq, there's no need to go
2010 		 * through the full deactivate/activate cycle. Optimize by
2011 		 * abbreviating move_remote_task_to_local_dsq().
2012 		 */
2013 		if (src_rq == dst_rq) {
2014 			p->scx.holding_cpu = -1;
2015 			dispatch_enqueue(sch, &dst_rq->scx.local_dsq, p,
2016 					 enq_flags);
2017 		} else {
2018 			move_remote_task_to_local_dsq(p, enq_flags,
2019 						      src_rq, dst_rq);
2020 			/* task has been moved to dst_rq, which is now locked */
2021 			locked_rq = dst_rq;
2022 		}
2023 
2024 		/* if the destination CPU is idle, wake it up */
2025 		if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2026 			resched_curr(dst_rq);
2027 	}
2028 
2029 	/* switch back to @rq lock */
2030 	if (locked_rq != rq) {
2031 		raw_spin_rq_unlock(locked_rq);
2032 		raw_spin_rq_lock(rq);
2033 	}
2034 }
2035 
2036 /**
2037  * finish_dispatch - Asynchronously finish dispatching a task
2038  * @rq: current rq which is locked
2039  * @p: task to finish dispatching
2040  * @qseq_at_dispatch: qseq when @p started getting dispatched
2041  * @dsq_id: destination DSQ ID
2042  * @enq_flags: %SCX_ENQ_*
2043  *
2044  * Dispatching to local DSQs may need to wait for queueing to complete or
2045  * require rq lock dancing. As we don't wanna do either while inside
2046  * ops.dispatch() to avoid locking order inversion, we split dispatching into
2047  * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the
2048  * task and its qseq. Once ops.dispatch() returns, this function is called to
2049  * finish up.
2050  *
2051  * There is no guarantee that @p is still valid for dispatching or even that it
2052  * was valid in the first place. Make sure that the task is still owned by the
2053  * BPF scheduler and claim the ownership before dispatching.
2054  */
2055 static void finish_dispatch(struct scx_sched *sch, struct rq *rq,
2056 			    struct task_struct *p,
2057 			    unsigned long qseq_at_dispatch,
2058 			    u64 dsq_id, u64 enq_flags)
2059 {
2060 	struct scx_dispatch_q *dsq;
2061 	unsigned long opss;
2062 
2063 	touch_core_sched_dispatch(rq, p);
2064 retry:
2065 	/*
2066 	 * No need for _acquire here. @p is accessed only after a successful
2067 	 * try_cmpxchg to DISPATCHING.
2068 	 */
2069 	opss = atomic_long_read(&p->scx.ops_state);
2070 
2071 	switch (opss & SCX_OPSS_STATE_MASK) {
2072 	case SCX_OPSS_DISPATCHING:
2073 	case SCX_OPSS_NONE:
2074 		/* someone else already got to it */
2075 		return;
2076 	case SCX_OPSS_QUEUED:
2077 		/*
2078 		 * If qseq doesn't match, @p has gone through at least one
2079 		 * dispatch/dequeue and re-enqueue cycle between
2080 		 * scx_bpf_dsq_insert() and here and we have no claim on it.
2081 		 */
2082 		if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
2083 			return;
2084 
2085 		/*
2086 		 * While we know @p is accessible, we don't yet have a claim on
2087 		 * it - the BPF scheduler is allowed to dispatch tasks
2088 		 * spuriously and there can be a racing dequeue attempt. Let's
2089 		 * claim @p by atomically transitioning it from QUEUED to
2090 		 * DISPATCHING.
2091 		 */
2092 		if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2093 						   SCX_OPSS_DISPATCHING)))
2094 			break;
2095 		goto retry;
2096 	case SCX_OPSS_QUEUEING:
2097 		/*
2098 		 * do_enqueue_task() is in the process of transferring the task
2099 		 * to the BPF scheduler while holding @p's rq lock. As we aren't
2100 		 * holding any kernel or BPF resource that the enqueue path may
2101 		 * depend upon, it's safe to wait.
2102 		 */
2103 		wait_ops_state(p, opss);
2104 		goto retry;
2105 	}
2106 
2107 	BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
2108 
2109 	dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, p);
2110 
2111 	if (dsq->id == SCX_DSQ_LOCAL)
2112 		dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
2113 	else
2114 		dispatch_enqueue(sch, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2115 }
2116 
2117 static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
2118 {
2119 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2120 	u32 u;
2121 
2122 	for (u = 0; u < dspc->cursor; u++) {
2123 		struct scx_dsp_buf_ent *ent = &dspc->buf[u];
2124 
2125 		finish_dispatch(sch, rq, ent->task, ent->qseq, ent->dsq_id,
2126 				ent->enq_flags);
2127 	}
2128 
2129 	dspc->nr_tasks += dspc->cursor;
2130 	dspc->cursor = 0;
2131 }
2132 
2133 static inline void maybe_queue_balance_callback(struct rq *rq)
2134 {
2135 	lockdep_assert_rq_held(rq);
2136 
2137 	if (!(rq->scx.flags & SCX_RQ_BAL_CB_PENDING))
2138 		return;
2139 
2140 	queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
2141 				deferred_bal_cb_workfn);
2142 
2143 	rq->scx.flags &= ~SCX_RQ_BAL_CB_PENDING;
2144 }
2145 
2146 static int balance_one(struct rq *rq, struct task_struct *prev)
2147 {
2148 	struct scx_sched *sch = scx_root;
2149 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2150 	bool prev_on_scx = prev->sched_class == &ext_sched_class;
2151 	bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED;
2152 	int nr_loops = SCX_DSP_MAX_LOOPS;
2153 
2154 	lockdep_assert_rq_held(rq);
2155 	rq->scx.flags |= SCX_RQ_IN_BALANCE;
2156 	rq->scx.flags &= ~SCX_RQ_BAL_KEEP;
2157 
2158 	if ((sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT) &&
2159 	    unlikely(rq->scx.cpu_released)) {
2160 		/*
2161 		 * If the previous sched_class for the current CPU was not SCX,
2162 		 * notify the BPF scheduler that it again has control of the
2163 		 * core. This callback complements ->cpu_release(), which is
2164 		 * emitted in switch_class().
2165 		 */
2166 		if (SCX_HAS_OP(sch, cpu_acquire))
2167 			SCX_CALL_OP(sch, SCX_KF_REST, cpu_acquire, rq,
2168 				    cpu_of(rq), NULL);
2169 		rq->scx.cpu_released = false;
2170 	}
2171 
2172 	if (prev_on_scx) {
2173 		update_curr_scx(rq);
2174 
2175 		/*
2176 		 * If @prev is runnable & has slice left, it has priority and
2177 		 * fetching more just increases latency for the fetched tasks.
2178 		 * Tell pick_task_scx() to keep running @prev. If the BPF
2179 		 * scheduler wants to handle this explicitly, it should
2180 		 * implement ->cpu_release().
2181 		 *
2182 		 * See scx_disable_workfn() for the explanation on the bypassing
2183 		 * test.
2184 		 */
2185 		if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) {
2186 			rq->scx.flags |= SCX_RQ_BAL_KEEP;
2187 			goto has_tasks;
2188 		}
2189 	}
2190 
2191 	/* if there already are tasks to run, nothing to do */
2192 	if (rq->scx.local_dsq.nr)
2193 		goto has_tasks;
2194 
2195 	if (consume_global_dsq(sch, rq))
2196 		goto has_tasks;
2197 
2198 	if (scx_rq_bypassing(rq)) {
2199 		if (consume_dispatch_q(sch, rq, &rq->scx.bypass_dsq))
2200 			goto has_tasks;
2201 		else
2202 			goto no_tasks;
2203 	}
2204 
2205 	if (unlikely(!SCX_HAS_OP(sch, dispatch)) || !scx_rq_online(rq))
2206 		goto no_tasks;
2207 
2208 	dspc->rq = rq;
2209 
2210 	/*
2211 	 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
2212 	 * the local DSQ might still end up empty after a successful
2213 	 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
2214 	 * produced some tasks, retry. The BPF scheduler may depend on this
2215 	 * looping behavior to simplify its implementation.
2216 	 */
2217 	do {
2218 		dspc->nr_tasks = 0;
2219 
2220 		SCX_CALL_OP(sch, SCX_KF_DISPATCH, dispatch, rq,
2221 			    cpu_of(rq), prev_on_scx ? prev : NULL);
2222 
2223 		flush_dispatch_buf(sch, rq);
2224 
2225 		if (prev_on_rq && prev->scx.slice) {
2226 			rq->scx.flags |= SCX_RQ_BAL_KEEP;
2227 			goto has_tasks;
2228 		}
2229 		if (rq->scx.local_dsq.nr)
2230 			goto has_tasks;
2231 		if (consume_global_dsq(sch, rq))
2232 			goto has_tasks;
2233 
2234 		/*
2235 		 * ops.dispatch() can trap us in this loop by repeatedly
2236 		 * dispatching ineligible tasks. Break out once in a while to
2237 		 * allow the watchdog to run. As IRQ can't be enabled in
2238 		 * balance(), we want to complete this scheduling cycle and then
2239 		 * start a new one. IOW, we want to call resched_curr() on the
2240 		 * next, most likely idle, task, not the current one. Use
2241 		 * scx_kick_cpu() for deferred kicking.
2242 		 */
2243 		if (unlikely(!--nr_loops)) {
2244 			scx_kick_cpu(sch, cpu_of(rq), 0);
2245 			break;
2246 		}
2247 	} while (dspc->nr_tasks);
2248 
2249 no_tasks:
2250 	/*
2251 	 * Didn't find another task to run. Keep running @prev unless
2252 	 * %SCX_OPS_ENQ_LAST is in effect.
2253 	 */
2254 	if (prev_on_rq &&
2255 	    (!(sch->ops.flags & SCX_OPS_ENQ_LAST) || scx_rq_bypassing(rq))) {
2256 		rq->scx.flags |= SCX_RQ_BAL_KEEP;
2257 		__scx_add_event(sch, SCX_EV_DISPATCH_KEEP_LAST, 1);
2258 		goto has_tasks;
2259 	}
2260 	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2261 	return false;
2262 
2263 has_tasks:
2264 	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2265 	return true;
2266 }
2267 
2268 static void process_ddsp_deferred_locals(struct rq *rq)
2269 {
2270 	struct task_struct *p;
2271 
2272 	lockdep_assert_rq_held(rq);
2273 
2274 	/*
2275 	 * Now that @rq can be unlocked, execute the deferred enqueueing of
2276 	 * tasks directly dispatched to the local DSQs of other CPUs. See
2277 	 * direct_dispatch(). Keep popping from the head instead of using
2278 	 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
2279 	 * temporarily.
2280 	 */
2281 	while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
2282 				struct task_struct, scx.dsq_list.node))) {
2283 		struct scx_sched *sch = scx_root;
2284 		struct scx_dispatch_q *dsq;
2285 
2286 		list_del_init(&p->scx.dsq_list.node);
2287 
2288 		dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
2289 		if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
2290 			dispatch_to_local_dsq(sch, rq, dsq, p,
2291 					      p->scx.ddsp_enq_flags);
2292 	}
2293 }
2294 
2295 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
2296 {
2297 	struct scx_sched *sch = scx_root;
2298 
2299 	if (p->scx.flags & SCX_TASK_QUEUED) {
2300 		/*
2301 		 * Core-sched might decide to execute @p before it is
2302 		 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
2303 		 */
2304 		ops_dequeue(rq, p, SCX_DEQ_CORE_SCHED_EXEC);
2305 		dispatch_dequeue(rq, p);
2306 	}
2307 
2308 	p->se.exec_start = rq_clock_task(rq);
2309 
2310 	/* see dequeue_task_scx() on why we skip when !QUEUED */
2311 	if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED))
2312 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, running, rq, p);
2313 
2314 	clr_task_runnable(p, true);
2315 
2316 	/*
2317 	 * @p is getting newly scheduled or got kicked after someone updated its
2318 	 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
2319 	 */
2320 	if ((p->scx.slice == SCX_SLICE_INF) !=
2321 	    (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
2322 		if (p->scx.slice == SCX_SLICE_INF)
2323 			rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
2324 		else
2325 			rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
2326 
2327 		sched_update_tick_dependency(rq);
2328 
2329 		/*
2330 		 * For now, let's refresh the load_avgs just when transitioning
2331 		 * in and out of nohz. In the future, we might want to add a
2332 		 * mechanism which calls the following periodically on
2333 		 * tick-stopped CPUs.
2334 		 */
2335 		update_other_load_avgs(rq);
2336 	}
2337 }
2338 
2339 static enum scx_cpu_preempt_reason
2340 preempt_reason_from_class(const struct sched_class *class)
2341 {
2342 	if (class == &stop_sched_class)
2343 		return SCX_CPU_PREEMPT_STOP;
2344 	if (class == &dl_sched_class)
2345 		return SCX_CPU_PREEMPT_DL;
2346 	if (class == &rt_sched_class)
2347 		return SCX_CPU_PREEMPT_RT;
2348 	return SCX_CPU_PREEMPT_UNKNOWN;
2349 }
2350 
2351 static void switch_class(struct rq *rq, struct task_struct *next)
2352 {
2353 	struct scx_sched *sch = scx_root;
2354 	const struct sched_class *next_class = next->sched_class;
2355 
2356 	if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT))
2357 		return;
2358 
2359 	/*
2360 	 * The callback is conceptually meant to convey that the CPU is no
2361 	 * longer under the control of SCX. Therefore, don't invoke the callback
2362 	 * if the next class is below SCX (in which case the BPF scheduler has
2363 	 * actively decided not to schedule any tasks on the CPU).
2364 	 */
2365 	if (sched_class_above(&ext_sched_class, next_class))
2366 		return;
2367 
2368 	/*
2369 	 * At this point we know that SCX was preempted by a higher priority
2370 	 * sched_class, so invoke the ->cpu_release() callback if we have not
2371 	 * done so already. We only send the callback once between SCX being
2372 	 * preempted, and it regaining control of the CPU.
2373 	 *
2374 	 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
2375 	 *  next time that balance_scx() is invoked.
2376 	 */
2377 	if (!rq->scx.cpu_released) {
2378 		if (SCX_HAS_OP(sch, cpu_release)) {
2379 			struct scx_cpu_release_args args = {
2380 				.reason = preempt_reason_from_class(next_class),
2381 				.task = next,
2382 			};
2383 
2384 			SCX_CALL_OP(sch, SCX_KF_CPU_RELEASE, cpu_release, rq,
2385 				    cpu_of(rq), &args);
2386 		}
2387 		rq->scx.cpu_released = true;
2388 	}
2389 }
2390 
2391 static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
2392 			      struct task_struct *next)
2393 {
2394 	struct scx_sched *sch = scx_root;
2395 
2396 	/* see kick_cpus_irq_workfn() */
2397 	smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
2398 
2399 	update_curr_scx(rq);
2400 
2401 	/* see dequeue_task_scx() on why we skip when !QUEUED */
2402 	if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED))
2403 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, true);
2404 
2405 	if (p->scx.flags & SCX_TASK_QUEUED) {
2406 		set_task_runnable(rq, p);
2407 
2408 		/*
2409 		 * If @p has slice left and is being put, @p is getting
2410 		 * preempted by a higher priority scheduler class or core-sched
2411 		 * forcing a different task. Leave it at the head of the local
2412 		 * DSQ.
2413 		 */
2414 		if (p->scx.slice && !scx_rq_bypassing(rq)) {
2415 			dispatch_enqueue(sch, &rq->scx.local_dsq, p,
2416 					 SCX_ENQ_HEAD);
2417 			goto switch_class;
2418 		}
2419 
2420 		/*
2421 		 * If @p is runnable but we're about to enter a lower
2422 		 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
2423 		 * ops.enqueue() that @p is the only one available for this cpu,
2424 		 * which should trigger an explicit follow-up scheduling event.
2425 		 */
2426 		if (next && sched_class_above(&ext_sched_class, next->sched_class)) {
2427 			WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST));
2428 			do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
2429 		} else {
2430 			do_enqueue_task(rq, p, 0, -1);
2431 		}
2432 	}
2433 
2434 switch_class:
2435 	if (next && next->sched_class != &ext_sched_class)
2436 		switch_class(rq, next);
2437 }
2438 
2439 static struct task_struct *first_local_task(struct rq *rq)
2440 {
2441 	return list_first_entry_or_null(&rq->scx.local_dsq.list,
2442 					struct task_struct, scx.dsq_list.node);
2443 }
2444 
2445 static struct task_struct *
2446 do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx)
2447 {
2448 	struct task_struct *prev = rq->curr;
2449 	bool keep_prev;
2450 	struct task_struct *p;
2451 
2452 	/* see kick_cpus_irq_workfn() */
2453 	smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
2454 
2455 	rq_modified_clear(rq);
2456 
2457 	rq_unpin_lock(rq, rf);
2458 	balance_one(rq, prev);
2459 	rq_repin_lock(rq, rf);
2460 	maybe_queue_balance_callback(rq);
2461 
2462 	/*
2463 	 * If any higher-priority sched class enqueued a runnable task on
2464 	 * this rq during balance_one(), abort and return RETRY_TASK, so
2465 	 * that the scheduler loop can restart.
2466 	 *
2467 	 * If @force_scx is true, always try to pick a SCHED_EXT task,
2468 	 * regardless of any higher-priority sched classes activity.
2469 	 */
2470 	if (!force_scx && rq_modified_above(rq, &ext_sched_class))
2471 		return RETRY_TASK;
2472 
2473 	keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
2474 	if (unlikely(keep_prev &&
2475 		     prev->sched_class != &ext_sched_class)) {
2476 		WARN_ON_ONCE(scx_enable_state() == SCX_ENABLED);
2477 		keep_prev = false;
2478 	}
2479 
2480 	/*
2481 	 * If balance_scx() is telling us to keep running @prev, replenish slice
2482 	 * if necessary and keep running @prev. Otherwise, pop the first one
2483 	 * from the local DSQ.
2484 	 */
2485 	if (keep_prev) {
2486 		p = prev;
2487 		if (!p->scx.slice)
2488 			refill_task_slice_dfl(rcu_dereference_sched(scx_root), p);
2489 	} else {
2490 		p = first_local_task(rq);
2491 		if (!p)
2492 			return NULL;
2493 
2494 		if (unlikely(!p->scx.slice)) {
2495 			struct scx_sched *sch = rcu_dereference_sched(scx_root);
2496 
2497 			if (!scx_rq_bypassing(rq) && !sch->warned_zero_slice) {
2498 				printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n",
2499 						p->comm, p->pid, __func__);
2500 				sch->warned_zero_slice = true;
2501 			}
2502 			refill_task_slice_dfl(sch, p);
2503 		}
2504 	}
2505 
2506 	return p;
2507 }
2508 
2509 static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf)
2510 {
2511 	return do_pick_task_scx(rq, rf, false);
2512 }
2513 
2514 #ifdef CONFIG_SCHED_CORE
2515 /**
2516  * scx_prio_less - Task ordering for core-sched
2517  * @a: task A
2518  * @b: task B
2519  * @in_fi: in forced idle state
2520  *
2521  * Core-sched is implemented as an additional scheduling layer on top of the
2522  * usual sched_class'es and needs to find out the expected task ordering. For
2523  * SCX, core-sched calls this function to interrogate the task ordering.
2524  *
2525  * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
2526  * to implement the default task ordering. The older the timestamp, the higher
2527  * priority the task - the global FIFO ordering matching the default scheduling
2528  * behavior.
2529  *
2530  * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
2531  * implement FIFO ordering within each local DSQ. See pick_task_scx().
2532  */
2533 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
2534 		   bool in_fi)
2535 {
2536 	struct scx_sched *sch = scx_root;
2537 
2538 	/*
2539 	 * The const qualifiers are dropped from task_struct pointers when
2540 	 * calling ops.core_sched_before(). Accesses are controlled by the
2541 	 * verifier.
2542 	 */
2543 	if (SCX_HAS_OP(sch, core_sched_before) &&
2544 	    !scx_rq_bypassing(task_rq(a)))
2545 		return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, core_sched_before,
2546 					      NULL,
2547 					      (struct task_struct *)a,
2548 					      (struct task_struct *)b);
2549 	else
2550 		return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
2551 }
2552 #endif	/* CONFIG_SCHED_CORE */
2553 
2554 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
2555 {
2556 	struct scx_sched *sch = scx_root;
2557 	bool rq_bypass;
2558 
2559 	/*
2560 	 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
2561 	 * can be a good migration opportunity with low cache and memory
2562 	 * footprint. Returning a CPU different than @prev_cpu triggers
2563 	 * immediate rq migration. However, for SCX, as the current rq
2564 	 * association doesn't dictate where the task is going to run, this
2565 	 * doesn't fit well. If necessary, we can later add a dedicated method
2566 	 * which can decide to preempt self to force it through the regular
2567 	 * scheduling path.
2568 	 */
2569 	if (unlikely(wake_flags & WF_EXEC))
2570 		return prev_cpu;
2571 
2572 	rq_bypass = scx_rq_bypassing(task_rq(p));
2573 	if (likely(SCX_HAS_OP(sch, select_cpu)) && !rq_bypass) {
2574 		s32 cpu;
2575 		struct task_struct **ddsp_taskp;
2576 
2577 		ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
2578 		WARN_ON_ONCE(*ddsp_taskp);
2579 		*ddsp_taskp = p;
2580 
2581 		cpu = SCX_CALL_OP_TASK_RET(sch,
2582 					   SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
2583 					   select_cpu, NULL, p, prev_cpu,
2584 					   wake_flags);
2585 		p->scx.selected_cpu = cpu;
2586 		*ddsp_taskp = NULL;
2587 		if (ops_cpu_valid(sch, cpu, "from ops.select_cpu()"))
2588 			return cpu;
2589 		else
2590 			return prev_cpu;
2591 	} else {
2592 		s32 cpu;
2593 
2594 		cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0);
2595 		if (cpu >= 0) {
2596 			refill_task_slice_dfl(sch, p);
2597 			p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
2598 		} else {
2599 			cpu = prev_cpu;
2600 		}
2601 		p->scx.selected_cpu = cpu;
2602 
2603 		if (rq_bypass)
2604 			__scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
2605 		return cpu;
2606 	}
2607 }
2608 
2609 static void task_woken_scx(struct rq *rq, struct task_struct *p)
2610 {
2611 	run_deferred(rq);
2612 }
2613 
2614 static void set_cpus_allowed_scx(struct task_struct *p,
2615 				 struct affinity_context *ac)
2616 {
2617 	struct scx_sched *sch = scx_root;
2618 
2619 	set_cpus_allowed_common(p, ac);
2620 
2621 	/*
2622 	 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
2623 	 * differ from the configured one in @p->cpus_mask. Always tell the bpf
2624 	 * scheduler the effective one.
2625 	 *
2626 	 * Fine-grained memory write control is enforced by BPF making the const
2627 	 * designation pointless. Cast it away when calling the operation.
2628 	 */
2629 	if (SCX_HAS_OP(sch, set_cpumask))
2630 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, NULL,
2631 				 p, (struct cpumask *)p->cpus_ptr);
2632 }
2633 
2634 static void handle_hotplug(struct rq *rq, bool online)
2635 {
2636 	struct scx_sched *sch = scx_root;
2637 	int cpu = cpu_of(rq);
2638 
2639 	atomic_long_inc(&scx_hotplug_seq);
2640 
2641 	/*
2642 	 * scx_root updates are protected by cpus_read_lock() and will stay
2643 	 * stable here. Note that we can't depend on scx_enabled() test as the
2644 	 * hotplug ops need to be enabled before __scx_enabled is set.
2645 	 */
2646 	if (unlikely(!sch))
2647 		return;
2648 
2649 	if (scx_enabled())
2650 		scx_idle_update_selcpu_topology(&sch->ops);
2651 
2652 	if (online && SCX_HAS_OP(sch, cpu_online))
2653 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_online, NULL, cpu);
2654 	else if (!online && SCX_HAS_OP(sch, cpu_offline))
2655 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_offline, NULL, cpu);
2656 	else
2657 		scx_exit(sch, SCX_EXIT_UNREG_KERN,
2658 			 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
2659 			 "cpu %d going %s, exiting scheduler", cpu,
2660 			 online ? "online" : "offline");
2661 }
2662 
2663 void scx_rq_activate(struct rq *rq)
2664 {
2665 	handle_hotplug(rq, true);
2666 }
2667 
2668 void scx_rq_deactivate(struct rq *rq)
2669 {
2670 	handle_hotplug(rq, false);
2671 }
2672 
2673 static void rq_online_scx(struct rq *rq)
2674 {
2675 	rq->scx.flags |= SCX_RQ_ONLINE;
2676 }
2677 
2678 static void rq_offline_scx(struct rq *rq)
2679 {
2680 	rq->scx.flags &= ~SCX_RQ_ONLINE;
2681 }
2682 
2683 
2684 static bool check_rq_for_timeouts(struct rq *rq)
2685 {
2686 	struct scx_sched *sch;
2687 	struct task_struct *p;
2688 	struct rq_flags rf;
2689 	bool timed_out = false;
2690 
2691 	rq_lock_irqsave(rq, &rf);
2692 	sch = rcu_dereference_bh(scx_root);
2693 	if (unlikely(!sch))
2694 		goto out_unlock;
2695 
2696 	list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
2697 		unsigned long last_runnable = p->scx.runnable_at;
2698 
2699 		if (unlikely(time_after(jiffies,
2700 					last_runnable + scx_watchdog_timeout))) {
2701 			u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
2702 
2703 			scx_exit(sch, SCX_EXIT_ERROR_STALL, 0,
2704 				 "%s[%d] failed to run for %u.%03us",
2705 				 p->comm, p->pid, dur_ms / 1000, dur_ms % 1000);
2706 			timed_out = true;
2707 			break;
2708 		}
2709 	}
2710 out_unlock:
2711 	rq_unlock_irqrestore(rq, &rf);
2712 	return timed_out;
2713 }
2714 
2715 static void scx_watchdog_workfn(struct work_struct *work)
2716 {
2717 	int cpu;
2718 
2719 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
2720 
2721 	for_each_online_cpu(cpu) {
2722 		if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
2723 			break;
2724 
2725 		cond_resched();
2726 	}
2727 	queue_delayed_work(system_unbound_wq, to_delayed_work(work),
2728 			   scx_watchdog_timeout / 2);
2729 }
2730 
2731 void scx_tick(struct rq *rq)
2732 {
2733 	struct scx_sched *sch;
2734 	unsigned long last_check;
2735 
2736 	if (!scx_enabled())
2737 		return;
2738 
2739 	sch = rcu_dereference_bh(scx_root);
2740 	if (unlikely(!sch))
2741 		return;
2742 
2743 	last_check = READ_ONCE(scx_watchdog_timestamp);
2744 	if (unlikely(time_after(jiffies,
2745 				last_check + READ_ONCE(scx_watchdog_timeout)))) {
2746 		u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
2747 
2748 		scx_exit(sch, SCX_EXIT_ERROR_STALL, 0,
2749 			 "watchdog failed to check in for %u.%03us",
2750 			 dur_ms / 1000, dur_ms % 1000);
2751 	}
2752 
2753 	update_other_load_avgs(rq);
2754 }
2755 
2756 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
2757 {
2758 	struct scx_sched *sch = scx_root;
2759 
2760 	update_curr_scx(rq);
2761 
2762 	/*
2763 	 * While disabling, always resched and refresh core-sched timestamp as
2764 	 * we can't trust the slice management or ops.core_sched_before().
2765 	 */
2766 	if (scx_rq_bypassing(rq)) {
2767 		curr->scx.slice = 0;
2768 		touch_core_sched(rq, curr);
2769 	} else if (SCX_HAS_OP(sch, tick)) {
2770 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, tick, rq, curr);
2771 	}
2772 
2773 	if (!curr->scx.slice)
2774 		resched_curr(rq);
2775 }
2776 
2777 #ifdef CONFIG_EXT_GROUP_SCHED
2778 static struct cgroup *tg_cgrp(struct task_group *tg)
2779 {
2780 	/*
2781 	 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
2782 	 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
2783 	 * root cgroup.
2784 	 */
2785 	if (tg && tg->css.cgroup)
2786 		return tg->css.cgroup;
2787 	else
2788 		return &cgrp_dfl_root.cgrp;
2789 }
2790 
2791 #define SCX_INIT_TASK_ARGS_CGROUP(tg)		.cgroup = tg_cgrp(tg),
2792 
2793 #else	/* CONFIG_EXT_GROUP_SCHED */
2794 
2795 #define SCX_INIT_TASK_ARGS_CGROUP(tg)
2796 
2797 #endif	/* CONFIG_EXT_GROUP_SCHED */
2798 
2799 static enum scx_task_state scx_get_task_state(const struct task_struct *p)
2800 {
2801 	return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
2802 }
2803 
2804 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
2805 {
2806 	enum scx_task_state prev_state = scx_get_task_state(p);
2807 	bool warn = false;
2808 
2809 	BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
2810 
2811 	switch (state) {
2812 	case SCX_TASK_NONE:
2813 		break;
2814 	case SCX_TASK_INIT:
2815 		warn = prev_state != SCX_TASK_NONE;
2816 		break;
2817 	case SCX_TASK_READY:
2818 		warn = prev_state == SCX_TASK_NONE;
2819 		break;
2820 	case SCX_TASK_ENABLED:
2821 		warn = prev_state != SCX_TASK_READY;
2822 		break;
2823 	default:
2824 		warn = true;
2825 		return;
2826 	}
2827 
2828 	WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
2829 		  prev_state, state, p->comm, p->pid);
2830 
2831 	p->scx.flags &= ~SCX_TASK_STATE_MASK;
2832 	p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
2833 }
2834 
2835 static int scx_init_task(struct task_struct *p, struct task_group *tg, bool fork)
2836 {
2837 	struct scx_sched *sch = scx_root;
2838 	int ret;
2839 
2840 	p->scx.disallow = false;
2841 
2842 	if (SCX_HAS_OP(sch, init_task)) {
2843 		struct scx_init_task_args args = {
2844 			SCX_INIT_TASK_ARGS_CGROUP(tg)
2845 			.fork = fork,
2846 		};
2847 
2848 		ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init_task, NULL,
2849 				      p, &args);
2850 		if (unlikely(ret)) {
2851 			ret = ops_sanitize_err(sch, "init_task", ret);
2852 			return ret;
2853 		}
2854 	}
2855 
2856 	scx_set_task_state(p, SCX_TASK_INIT);
2857 
2858 	if (p->scx.disallow) {
2859 		if (!fork) {
2860 			struct rq *rq;
2861 			struct rq_flags rf;
2862 
2863 			rq = task_rq_lock(p, &rf);
2864 
2865 			/*
2866 			 * We're in the load path and @p->policy will be applied
2867 			 * right after. Reverting @p->policy here and rejecting
2868 			 * %SCHED_EXT transitions from scx_check_setscheduler()
2869 			 * guarantees that if ops.init_task() sets @p->disallow,
2870 			 * @p can never be in SCX.
2871 			 */
2872 			if (p->policy == SCHED_EXT) {
2873 				p->policy = SCHED_NORMAL;
2874 				atomic_long_inc(&scx_nr_rejected);
2875 			}
2876 
2877 			task_rq_unlock(rq, p, &rf);
2878 		} else if (p->policy == SCHED_EXT) {
2879 			scx_error(sch, "ops.init_task() set task->scx.disallow for %s[%d] during fork",
2880 				  p->comm, p->pid);
2881 		}
2882 	}
2883 
2884 	p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
2885 	return 0;
2886 }
2887 
2888 static void scx_enable_task(struct task_struct *p)
2889 {
2890 	struct scx_sched *sch = scx_root;
2891 	struct rq *rq = task_rq(p);
2892 	u32 weight;
2893 
2894 	lockdep_assert_rq_held(rq);
2895 
2896 	/*
2897 	 * Set the weight before calling ops.enable() so that the scheduler
2898 	 * doesn't see a stale value if they inspect the task struct.
2899 	 */
2900 	if (task_has_idle_policy(p))
2901 		weight = WEIGHT_IDLEPRIO;
2902 	else
2903 		weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
2904 
2905 	p->scx.weight = sched_weight_to_cgroup(weight);
2906 
2907 	if (SCX_HAS_OP(sch, enable))
2908 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, enable, rq, p);
2909 	scx_set_task_state(p, SCX_TASK_ENABLED);
2910 
2911 	if (SCX_HAS_OP(sch, set_weight))
2912 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,
2913 				 p, p->scx.weight);
2914 }
2915 
2916 static void scx_disable_task(struct task_struct *p)
2917 {
2918 	struct scx_sched *sch = scx_root;
2919 	struct rq *rq = task_rq(p);
2920 
2921 	lockdep_assert_rq_held(rq);
2922 	WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
2923 
2924 	if (SCX_HAS_OP(sch, disable))
2925 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p);
2926 	scx_set_task_state(p, SCX_TASK_READY);
2927 }
2928 
2929 static void scx_exit_task(struct task_struct *p)
2930 {
2931 	struct scx_sched *sch = scx_root;
2932 	struct scx_exit_task_args args = {
2933 		.cancelled = false,
2934 	};
2935 
2936 	lockdep_assert_rq_held(task_rq(p));
2937 
2938 	switch (scx_get_task_state(p)) {
2939 	case SCX_TASK_NONE:
2940 		return;
2941 	case SCX_TASK_INIT:
2942 		args.cancelled = true;
2943 		break;
2944 	case SCX_TASK_READY:
2945 		break;
2946 	case SCX_TASK_ENABLED:
2947 		scx_disable_task(p);
2948 		break;
2949 	default:
2950 		WARN_ON_ONCE(true);
2951 		return;
2952 	}
2953 
2954 	if (SCX_HAS_OP(sch, exit_task))
2955 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, exit_task, task_rq(p),
2956 				 p, &args);
2957 	scx_set_task_state(p, SCX_TASK_NONE);
2958 }
2959 
2960 void init_scx_entity(struct sched_ext_entity *scx)
2961 {
2962 	memset(scx, 0, sizeof(*scx));
2963 	INIT_LIST_HEAD(&scx->dsq_list.node);
2964 	RB_CLEAR_NODE(&scx->dsq_priq);
2965 	scx->sticky_cpu = -1;
2966 	scx->holding_cpu = -1;
2967 	INIT_LIST_HEAD(&scx->runnable_node);
2968 	scx->runnable_at = jiffies;
2969 	scx->ddsp_dsq_id = SCX_DSQ_INVALID;
2970 	scx->slice = READ_ONCE(scx_slice_dfl);
2971 }
2972 
2973 void scx_pre_fork(struct task_struct *p)
2974 {
2975 	/*
2976 	 * BPF scheduler enable/disable paths want to be able to iterate and
2977 	 * update all tasks which can become complex when racing forks. As
2978 	 * enable/disable are very cold paths, let's use a percpu_rwsem to
2979 	 * exclude forks.
2980 	 */
2981 	percpu_down_read(&scx_fork_rwsem);
2982 }
2983 
2984 int scx_fork(struct task_struct *p)
2985 {
2986 	percpu_rwsem_assert_held(&scx_fork_rwsem);
2987 
2988 	if (scx_init_task_enabled)
2989 		return scx_init_task(p, task_group(p), true);
2990 	else
2991 		return 0;
2992 }
2993 
2994 void scx_post_fork(struct task_struct *p)
2995 {
2996 	if (scx_init_task_enabled) {
2997 		scx_set_task_state(p, SCX_TASK_READY);
2998 
2999 		/*
3000 		 * Enable the task immediately if it's running on sched_ext.
3001 		 * Otherwise, it'll be enabled in switching_to_scx() if and
3002 		 * when it's ever configured to run with a SCHED_EXT policy.
3003 		 */
3004 		if (p->sched_class == &ext_sched_class) {
3005 			struct rq_flags rf;
3006 			struct rq *rq;
3007 
3008 			rq = task_rq_lock(p, &rf);
3009 			scx_enable_task(p);
3010 			task_rq_unlock(rq, p, &rf);
3011 		}
3012 	}
3013 
3014 	raw_spin_lock_irq(&scx_tasks_lock);
3015 	list_add_tail(&p->scx.tasks_node, &scx_tasks);
3016 	raw_spin_unlock_irq(&scx_tasks_lock);
3017 
3018 	percpu_up_read(&scx_fork_rwsem);
3019 }
3020 
3021 void scx_cancel_fork(struct task_struct *p)
3022 {
3023 	if (scx_enabled()) {
3024 		struct rq *rq;
3025 		struct rq_flags rf;
3026 
3027 		rq = task_rq_lock(p, &rf);
3028 		WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
3029 		scx_exit_task(p);
3030 		task_rq_unlock(rq, p, &rf);
3031 	}
3032 
3033 	percpu_up_read(&scx_fork_rwsem);
3034 }
3035 
3036 void sched_ext_dead(struct task_struct *p)
3037 {
3038 	unsigned long flags;
3039 
3040 	raw_spin_lock_irqsave(&scx_tasks_lock, flags);
3041 	list_del_init(&p->scx.tasks_node);
3042 	raw_spin_unlock_irqrestore(&scx_tasks_lock, flags);
3043 
3044 	/*
3045 	 * @p is off scx_tasks and wholly ours. scx_enable()'s READY -> ENABLED
3046 	 * transitions can't race us. Disable ops for @p.
3047 	 */
3048 	if (scx_get_task_state(p) != SCX_TASK_NONE) {
3049 		struct rq_flags rf;
3050 		struct rq *rq;
3051 
3052 		rq = task_rq_lock(p, &rf);
3053 		scx_exit_task(p);
3054 		task_rq_unlock(rq, p, &rf);
3055 	}
3056 }
3057 
3058 static void reweight_task_scx(struct rq *rq, struct task_struct *p,
3059 			      const struct load_weight *lw)
3060 {
3061 	struct scx_sched *sch = scx_root;
3062 
3063 	lockdep_assert_rq_held(task_rq(p));
3064 
3065 	p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
3066 	if (SCX_HAS_OP(sch, set_weight))
3067 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,
3068 				 p, p->scx.weight);
3069 }
3070 
3071 static void prio_changed_scx(struct rq *rq, struct task_struct *p, u64 oldprio)
3072 {
3073 }
3074 
3075 static void switching_to_scx(struct rq *rq, struct task_struct *p)
3076 {
3077 	struct scx_sched *sch = scx_root;
3078 
3079 	scx_enable_task(p);
3080 
3081 	/*
3082 	 * set_cpus_allowed_scx() is not called while @p is associated with a
3083 	 * different scheduler class. Keep the BPF scheduler up-to-date.
3084 	 */
3085 	if (SCX_HAS_OP(sch, set_cpumask))
3086 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, rq,
3087 				 p, (struct cpumask *)p->cpus_ptr);
3088 }
3089 
3090 static void switched_from_scx(struct rq *rq, struct task_struct *p)
3091 {
3092 	scx_disable_task(p);
3093 }
3094 
3095 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
3096 static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
3097 
3098 int scx_check_setscheduler(struct task_struct *p, int policy)
3099 {
3100 	lockdep_assert_rq_held(task_rq(p));
3101 
3102 	/* if disallow, reject transitioning into SCX */
3103 	if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
3104 	    p->policy != policy && policy == SCHED_EXT)
3105 		return -EACCES;
3106 
3107 	return 0;
3108 }
3109 
3110 #ifdef CONFIG_NO_HZ_FULL
3111 bool scx_can_stop_tick(struct rq *rq)
3112 {
3113 	struct task_struct *p = rq->curr;
3114 
3115 	if (scx_rq_bypassing(rq))
3116 		return false;
3117 
3118 	if (p->sched_class != &ext_sched_class)
3119 		return true;
3120 
3121 	/*
3122 	 * @rq can dispatch from different DSQs, so we can't tell whether it
3123 	 * needs the tick or not by looking at nr_running. Allow stopping ticks
3124 	 * iff the BPF scheduler indicated so. See set_next_task_scx().
3125 	 */
3126 	return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
3127 }
3128 #endif
3129 
3130 #ifdef CONFIG_EXT_GROUP_SCHED
3131 
3132 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_ops_rwsem);
3133 static bool scx_cgroup_enabled;
3134 
3135 void scx_tg_init(struct task_group *tg)
3136 {
3137 	tg->scx.weight = CGROUP_WEIGHT_DFL;
3138 	tg->scx.bw_period_us = default_bw_period_us();
3139 	tg->scx.bw_quota_us = RUNTIME_INF;
3140 	tg->scx.idle = false;
3141 }
3142 
3143 int scx_tg_online(struct task_group *tg)
3144 {
3145 	struct scx_sched *sch = scx_root;
3146 	int ret = 0;
3147 
3148 	WARN_ON_ONCE(tg->scx.flags & (SCX_TG_ONLINE | SCX_TG_INITED));
3149 
3150 	if (scx_cgroup_enabled) {
3151 		if (SCX_HAS_OP(sch, cgroup_init)) {
3152 			struct scx_cgroup_init_args args =
3153 				{ .weight = tg->scx.weight,
3154 				  .bw_period_us = tg->scx.bw_period_us,
3155 				  .bw_quota_us = tg->scx.bw_quota_us,
3156 				  .bw_burst_us = tg->scx.bw_burst_us };
3157 
3158 			ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init,
3159 					      NULL, tg->css.cgroup, &args);
3160 			if (ret)
3161 				ret = ops_sanitize_err(sch, "cgroup_init", ret);
3162 		}
3163 		if (ret == 0)
3164 			tg->scx.flags |= SCX_TG_ONLINE | SCX_TG_INITED;
3165 	} else {
3166 		tg->scx.flags |= SCX_TG_ONLINE;
3167 	}
3168 
3169 	return ret;
3170 }
3171 
3172 void scx_tg_offline(struct task_group *tg)
3173 {
3174 	struct scx_sched *sch = scx_root;
3175 
3176 	WARN_ON_ONCE(!(tg->scx.flags & SCX_TG_ONLINE));
3177 
3178 	if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_exit) &&
3179 	    (tg->scx.flags & SCX_TG_INITED))
3180 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL,
3181 			    tg->css.cgroup);
3182 	tg->scx.flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
3183 }
3184 
3185 int scx_cgroup_can_attach(struct cgroup_taskset *tset)
3186 {
3187 	struct scx_sched *sch = scx_root;
3188 	struct cgroup_subsys_state *css;
3189 	struct task_struct *p;
3190 	int ret;
3191 
3192 	if (!scx_cgroup_enabled)
3193 		return 0;
3194 
3195 	cgroup_taskset_for_each(p, css, tset) {
3196 		struct cgroup *from = tg_cgrp(task_group(p));
3197 		struct cgroup *to = tg_cgrp(css_tg(css));
3198 
3199 		WARN_ON_ONCE(p->scx.cgrp_moving_from);
3200 
3201 		/*
3202 		 * sched_move_task() omits identity migrations. Let's match the
3203 		 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
3204 		 * always match one-to-one.
3205 		 */
3206 		if (from == to)
3207 			continue;
3208 
3209 		if (SCX_HAS_OP(sch, cgroup_prep_move)) {
3210 			ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED,
3211 					      cgroup_prep_move, NULL,
3212 					      p, from, css->cgroup);
3213 			if (ret)
3214 				goto err;
3215 		}
3216 
3217 		p->scx.cgrp_moving_from = from;
3218 	}
3219 
3220 	return 0;
3221 
3222 err:
3223 	cgroup_taskset_for_each(p, css, tset) {
3224 		if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
3225 		    p->scx.cgrp_moving_from)
3226 			SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL,
3227 				    p, p->scx.cgrp_moving_from, css->cgroup);
3228 		p->scx.cgrp_moving_from = NULL;
3229 	}
3230 
3231 	return ops_sanitize_err(sch, "cgroup_prep_move", ret);
3232 }
3233 
3234 void scx_cgroup_move_task(struct task_struct *p)
3235 {
3236 	struct scx_sched *sch = scx_root;
3237 
3238 	if (!scx_cgroup_enabled)
3239 		return;
3240 
3241 	/*
3242 	 * @p must have ops.cgroup_prep_move() called on it and thus
3243 	 * cgrp_moving_from set.
3244 	 */
3245 	if (SCX_HAS_OP(sch, cgroup_move) &&
3246 	    !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
3247 		SCX_CALL_OP_TASK(sch, SCX_KF_UNLOCKED, cgroup_move, NULL,
3248 				 p, p->scx.cgrp_moving_from,
3249 				 tg_cgrp(task_group(p)));
3250 	p->scx.cgrp_moving_from = NULL;
3251 }
3252 
3253 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
3254 {
3255 	struct scx_sched *sch = scx_root;
3256 	struct cgroup_subsys_state *css;
3257 	struct task_struct *p;
3258 
3259 	if (!scx_cgroup_enabled)
3260 		return;
3261 
3262 	cgroup_taskset_for_each(p, css, tset) {
3263 		if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
3264 		    p->scx.cgrp_moving_from)
3265 			SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL,
3266 				    p, p->scx.cgrp_moving_from, css->cgroup);
3267 		p->scx.cgrp_moving_from = NULL;
3268 	}
3269 }
3270 
3271 void scx_group_set_weight(struct task_group *tg, unsigned long weight)
3272 {
3273 	struct scx_sched *sch = scx_root;
3274 
3275 	percpu_down_read(&scx_cgroup_ops_rwsem);
3276 
3277 	if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) &&
3278 	    tg->scx.weight != weight)
3279 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_weight, NULL,
3280 			    tg_cgrp(tg), weight);
3281 
3282 	tg->scx.weight = weight;
3283 
3284 	percpu_up_read(&scx_cgroup_ops_rwsem);
3285 }
3286 
3287 void scx_group_set_idle(struct task_group *tg, bool idle)
3288 {
3289 	struct scx_sched *sch = scx_root;
3290 
3291 	percpu_down_read(&scx_cgroup_ops_rwsem);
3292 
3293 	if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_idle))
3294 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_idle, NULL,
3295 			    tg_cgrp(tg), idle);
3296 
3297 	/* Update the task group's idle state */
3298 	tg->scx.idle = idle;
3299 
3300 	percpu_up_read(&scx_cgroup_ops_rwsem);
3301 }
3302 
3303 void scx_group_set_bandwidth(struct task_group *tg,
3304 			     u64 period_us, u64 quota_us, u64 burst_us)
3305 {
3306 	struct scx_sched *sch = scx_root;
3307 
3308 	percpu_down_read(&scx_cgroup_ops_rwsem);
3309 
3310 	if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_bandwidth) &&
3311 	    (tg->scx.bw_period_us != period_us ||
3312 	     tg->scx.bw_quota_us != quota_us ||
3313 	     tg->scx.bw_burst_us != burst_us))
3314 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_bandwidth, NULL,
3315 			    tg_cgrp(tg), period_us, quota_us, burst_us);
3316 
3317 	tg->scx.bw_period_us = period_us;
3318 	tg->scx.bw_quota_us = quota_us;
3319 	tg->scx.bw_burst_us = burst_us;
3320 
3321 	percpu_up_read(&scx_cgroup_ops_rwsem);
3322 }
3323 
3324 static void scx_cgroup_lock(void)
3325 {
3326 	percpu_down_write(&scx_cgroup_ops_rwsem);
3327 	cgroup_lock();
3328 }
3329 
3330 static void scx_cgroup_unlock(void)
3331 {
3332 	cgroup_unlock();
3333 	percpu_up_write(&scx_cgroup_ops_rwsem);
3334 }
3335 
3336 #else	/* CONFIG_EXT_GROUP_SCHED */
3337 
3338 static void scx_cgroup_lock(void) {}
3339 static void scx_cgroup_unlock(void) {}
3340 
3341 #endif	/* CONFIG_EXT_GROUP_SCHED */
3342 
3343 /*
3344  * Omitted operations:
3345  *
3346  * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
3347  *   isn't tied to the CPU at that point. Preemption is implemented by resetting
3348  *   the victim task's slice to 0 and triggering reschedule on the target CPU.
3349  *
3350  * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
3351  *
3352  * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
3353  *   their current sched_class. Call them directly from sched core instead.
3354  */
3355 DEFINE_SCHED_CLASS(ext) = {
3356 	.queue_mask		= 1,
3357 
3358 	.enqueue_task		= enqueue_task_scx,
3359 	.dequeue_task		= dequeue_task_scx,
3360 	.yield_task		= yield_task_scx,
3361 	.yield_to_task		= yield_to_task_scx,
3362 
3363 	.wakeup_preempt		= wakeup_preempt_scx,
3364 
3365 	.pick_task		= pick_task_scx,
3366 
3367 	.put_prev_task		= put_prev_task_scx,
3368 	.set_next_task		= set_next_task_scx,
3369 
3370 	.select_task_rq		= select_task_rq_scx,
3371 	.task_woken		= task_woken_scx,
3372 	.set_cpus_allowed	= set_cpus_allowed_scx,
3373 
3374 	.rq_online		= rq_online_scx,
3375 	.rq_offline		= rq_offline_scx,
3376 
3377 	.task_tick		= task_tick_scx,
3378 
3379 	.switching_to		= switching_to_scx,
3380 	.switched_from		= switched_from_scx,
3381 	.switched_to		= switched_to_scx,
3382 	.reweight_task		= reweight_task_scx,
3383 	.prio_changed		= prio_changed_scx,
3384 
3385 	.update_curr		= update_curr_scx,
3386 
3387 #ifdef CONFIG_UCLAMP_TASK
3388 	.uclamp_enabled		= 1,
3389 #endif
3390 };
3391 
3392 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
3393 {
3394 	memset(dsq, 0, sizeof(*dsq));
3395 
3396 	raw_spin_lock_init(&dsq->lock);
3397 	INIT_LIST_HEAD(&dsq->list);
3398 	dsq->id = dsq_id;
3399 }
3400 
3401 static void free_dsq_irq_workfn(struct irq_work *irq_work)
3402 {
3403 	struct llist_node *to_free = llist_del_all(&dsqs_to_free);
3404 	struct scx_dispatch_q *dsq, *tmp_dsq;
3405 
3406 	llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
3407 		kfree_rcu(dsq, rcu);
3408 }
3409 
3410 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
3411 
3412 static void destroy_dsq(struct scx_sched *sch, u64 dsq_id)
3413 {
3414 	struct scx_dispatch_q *dsq;
3415 	unsigned long flags;
3416 
3417 	rcu_read_lock();
3418 
3419 	dsq = find_user_dsq(sch, dsq_id);
3420 	if (!dsq)
3421 		goto out_unlock_rcu;
3422 
3423 	raw_spin_lock_irqsave(&dsq->lock, flags);
3424 
3425 	if (dsq->nr) {
3426 		scx_error(sch, "attempting to destroy in-use dsq 0x%016llx (nr=%u)",
3427 			  dsq->id, dsq->nr);
3428 		goto out_unlock_dsq;
3429 	}
3430 
3431 	if (rhashtable_remove_fast(&sch->dsq_hash, &dsq->hash_node,
3432 				   dsq_hash_params))
3433 		goto out_unlock_dsq;
3434 
3435 	/*
3436 	 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
3437 	 * queueing more tasks. As this function can be called from anywhere,
3438 	 * freeing is bounced through an irq work to avoid nesting RCU
3439 	 * operations inside scheduler locks.
3440 	 */
3441 	dsq->id = SCX_DSQ_INVALID;
3442 	llist_add(&dsq->free_node, &dsqs_to_free);
3443 	irq_work_queue(&free_dsq_irq_work);
3444 
3445 out_unlock_dsq:
3446 	raw_spin_unlock_irqrestore(&dsq->lock, flags);
3447 out_unlock_rcu:
3448 	rcu_read_unlock();
3449 }
3450 
3451 #ifdef CONFIG_EXT_GROUP_SCHED
3452 static void scx_cgroup_exit(struct scx_sched *sch)
3453 {
3454 	struct cgroup_subsys_state *css;
3455 
3456 	scx_cgroup_enabled = false;
3457 
3458 	/*
3459 	 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk
3460 	 * cgroups and exit all the inited ones, all online cgroups are exited.
3461 	 */
3462 	css_for_each_descendant_post(css, &root_task_group.css) {
3463 		struct task_group *tg = css_tg(css);
3464 
3465 		if (!(tg->scx.flags & SCX_TG_INITED))
3466 			continue;
3467 		tg->scx.flags &= ~SCX_TG_INITED;
3468 
3469 		if (!sch->ops.cgroup_exit)
3470 			continue;
3471 
3472 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL,
3473 			    css->cgroup);
3474 	}
3475 }
3476 
3477 static int scx_cgroup_init(struct scx_sched *sch)
3478 {
3479 	struct cgroup_subsys_state *css;
3480 	int ret;
3481 
3482 	/*
3483 	 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk
3484 	 * cgroups and init, all online cgroups are initialized.
3485 	 */
3486 	css_for_each_descendant_pre(css, &root_task_group.css) {
3487 		struct task_group *tg = css_tg(css);
3488 		struct scx_cgroup_init_args args = {
3489 			.weight = tg->scx.weight,
3490 			.bw_period_us = tg->scx.bw_period_us,
3491 			.bw_quota_us = tg->scx.bw_quota_us,
3492 			.bw_burst_us = tg->scx.bw_burst_us,
3493 		};
3494 
3495 		if ((tg->scx.flags &
3496 		     (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
3497 			continue;
3498 
3499 		if (!sch->ops.cgroup_init) {
3500 			tg->scx.flags |= SCX_TG_INITED;
3501 			continue;
3502 		}
3503 
3504 		ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, NULL,
3505 				      css->cgroup, &args);
3506 		if (ret) {
3507 			css_put(css);
3508 			scx_error(sch, "ops.cgroup_init() failed (%d)", ret);
3509 			return ret;
3510 		}
3511 		tg->scx.flags |= SCX_TG_INITED;
3512 	}
3513 
3514 	WARN_ON_ONCE(scx_cgroup_enabled);
3515 	scx_cgroup_enabled = true;
3516 
3517 	return 0;
3518 }
3519 
3520 #else
3521 static void scx_cgroup_exit(struct scx_sched *sch) {}
3522 static int scx_cgroup_init(struct scx_sched *sch) { return 0; }
3523 #endif
3524 
3525 
3526 /********************************************************************************
3527  * Sysfs interface and ops enable/disable.
3528  */
3529 
3530 #define SCX_ATTR(_name)								\
3531 	static struct kobj_attribute scx_attr_##_name = {			\
3532 		.attr = { .name = __stringify(_name), .mode = 0444 },		\
3533 		.show = scx_attr_##_name##_show,				\
3534 	}
3535 
3536 static ssize_t scx_attr_state_show(struct kobject *kobj,
3537 				   struct kobj_attribute *ka, char *buf)
3538 {
3539 	return sysfs_emit(buf, "%s\n", scx_enable_state_str[scx_enable_state()]);
3540 }
3541 SCX_ATTR(state);
3542 
3543 static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
3544 					struct kobj_attribute *ka, char *buf)
3545 {
3546 	return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
3547 }
3548 SCX_ATTR(switch_all);
3549 
3550 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
3551 					 struct kobj_attribute *ka, char *buf)
3552 {
3553 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
3554 }
3555 SCX_ATTR(nr_rejected);
3556 
3557 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
3558 					 struct kobj_attribute *ka, char *buf)
3559 {
3560 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
3561 }
3562 SCX_ATTR(hotplug_seq);
3563 
3564 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
3565 					struct kobj_attribute *ka, char *buf)
3566 {
3567 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
3568 }
3569 SCX_ATTR(enable_seq);
3570 
3571 static struct attribute *scx_global_attrs[] = {
3572 	&scx_attr_state.attr,
3573 	&scx_attr_switch_all.attr,
3574 	&scx_attr_nr_rejected.attr,
3575 	&scx_attr_hotplug_seq.attr,
3576 	&scx_attr_enable_seq.attr,
3577 	NULL,
3578 };
3579 
3580 static const struct attribute_group scx_global_attr_group = {
3581 	.attrs = scx_global_attrs,
3582 };
3583 
3584 static void free_exit_info(struct scx_exit_info *ei);
3585 
3586 static void scx_sched_free_rcu_work(struct work_struct *work)
3587 {
3588 	struct rcu_work *rcu_work = to_rcu_work(work);
3589 	struct scx_sched *sch = container_of(rcu_work, struct scx_sched, rcu_work);
3590 	struct rhashtable_iter rht_iter;
3591 	struct scx_dispatch_q *dsq;
3592 	int node;
3593 
3594 	irq_work_sync(&sch->error_irq_work);
3595 	kthread_destroy_worker(sch->helper);
3596 
3597 	free_percpu(sch->pcpu);
3598 
3599 	for_each_node_state(node, N_POSSIBLE)
3600 		kfree(sch->global_dsqs[node]);
3601 	kfree(sch->global_dsqs);
3602 
3603 	rhashtable_walk_enter(&sch->dsq_hash, &rht_iter);
3604 	do {
3605 		rhashtable_walk_start(&rht_iter);
3606 
3607 		while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
3608 			destroy_dsq(sch, dsq->id);
3609 
3610 		rhashtable_walk_stop(&rht_iter);
3611 	} while (dsq == ERR_PTR(-EAGAIN));
3612 	rhashtable_walk_exit(&rht_iter);
3613 
3614 	rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
3615 	free_exit_info(sch->exit_info);
3616 	kfree(sch);
3617 }
3618 
3619 static void scx_kobj_release(struct kobject *kobj)
3620 {
3621 	struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
3622 
3623 	INIT_RCU_WORK(&sch->rcu_work, scx_sched_free_rcu_work);
3624 	queue_rcu_work(system_unbound_wq, &sch->rcu_work);
3625 }
3626 
3627 static ssize_t scx_attr_ops_show(struct kobject *kobj,
3628 				 struct kobj_attribute *ka, char *buf)
3629 {
3630 	return sysfs_emit(buf, "%s\n", scx_root->ops.name);
3631 }
3632 SCX_ATTR(ops);
3633 
3634 #define scx_attr_event_show(buf, at, events, kind) ({				\
3635 	sysfs_emit_at(buf, at, "%s %llu\n", #kind, (events)->kind);		\
3636 })
3637 
3638 static ssize_t scx_attr_events_show(struct kobject *kobj,
3639 				    struct kobj_attribute *ka, char *buf)
3640 {
3641 	struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
3642 	struct scx_event_stats events;
3643 	int at = 0;
3644 
3645 	scx_read_events(sch, &events);
3646 	at += scx_attr_event_show(buf, at, &events, SCX_EV_SELECT_CPU_FALLBACK);
3647 	at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
3648 	at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_KEEP_LAST);
3649 	at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_EXITING);
3650 	at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
3651 	at += scx_attr_event_show(buf, at, &events, SCX_EV_REFILL_SLICE_DFL);
3652 	at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DURATION);
3653 	at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DISPATCH);
3654 	at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_ACTIVATE);
3655 	return at;
3656 }
3657 SCX_ATTR(events);
3658 
3659 static struct attribute *scx_sched_attrs[] = {
3660 	&scx_attr_ops.attr,
3661 	&scx_attr_events.attr,
3662 	NULL,
3663 };
3664 ATTRIBUTE_GROUPS(scx_sched);
3665 
3666 static const struct kobj_type scx_ktype = {
3667 	.release = scx_kobj_release,
3668 	.sysfs_ops = &kobj_sysfs_ops,
3669 	.default_groups = scx_sched_groups,
3670 };
3671 
3672 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
3673 {
3674 	return add_uevent_var(env, "SCXOPS=%s", scx_root->ops.name);
3675 }
3676 
3677 static const struct kset_uevent_ops scx_uevent_ops = {
3678 	.uevent = scx_uevent,
3679 };
3680 
3681 /*
3682  * Used by sched_fork() and __setscheduler_prio() to pick the matching
3683  * sched_class. dl/rt are already handled.
3684  */
3685 bool task_should_scx(int policy)
3686 {
3687 	if (!scx_enabled() || unlikely(scx_enable_state() == SCX_DISABLING))
3688 		return false;
3689 	if (READ_ONCE(scx_switching_all))
3690 		return true;
3691 	return policy == SCHED_EXT;
3692 }
3693 
3694 bool scx_allow_ttwu_queue(const struct task_struct *p)
3695 {
3696 	struct scx_sched *sch;
3697 
3698 	if (!scx_enabled())
3699 		return true;
3700 
3701 	sch = rcu_dereference_sched(scx_root);
3702 	if (unlikely(!sch))
3703 		return true;
3704 
3705 	if (sch->ops.flags & SCX_OPS_ALLOW_QUEUED_WAKEUP)
3706 		return true;
3707 
3708 	if (unlikely(p->sched_class != &ext_sched_class))
3709 		return true;
3710 
3711 	return false;
3712 }
3713 
3714 /**
3715  * handle_lockup - sched_ext common lockup handler
3716  * @fmt: format string
3717  *
3718  * Called on system stall or lockup condition and initiates abort of sched_ext
3719  * if enabled, which may resolve the reported lockup.
3720  *
3721  * Returns %true if sched_ext is enabled and abort was initiated, which may
3722  * resolve the lockup. %false if sched_ext is not enabled or abort was already
3723  * initiated by someone else.
3724  */
3725 static __printf(1, 2) bool handle_lockup(const char *fmt, ...)
3726 {
3727 	struct scx_sched *sch;
3728 	va_list args;
3729 	bool ret;
3730 
3731 	guard(rcu)();
3732 
3733 	sch = rcu_dereference(scx_root);
3734 	if (unlikely(!sch))
3735 		return false;
3736 
3737 	switch (scx_enable_state()) {
3738 	case SCX_ENABLING:
3739 	case SCX_ENABLED:
3740 		va_start(args, fmt);
3741 		ret = scx_verror(sch, fmt, args);
3742 		va_end(args);
3743 		return ret;
3744 	default:
3745 		return false;
3746 	}
3747 }
3748 
3749 /**
3750  * scx_rcu_cpu_stall - sched_ext RCU CPU stall handler
3751  *
3752  * While there are various reasons why RCU CPU stalls can occur on a system
3753  * that may not be caused by the current BPF scheduler, try kicking out the
3754  * current scheduler in an attempt to recover the system to a good state before
3755  * issuing panics.
3756  *
3757  * Returns %true if sched_ext is enabled and abort was initiated, which may
3758  * resolve the reported RCU stall. %false if sched_ext is not enabled or someone
3759  * else already initiated abort.
3760  */
3761 bool scx_rcu_cpu_stall(void)
3762 {
3763 	return handle_lockup("RCU CPU stall detected!");
3764 }
3765 
3766 /**
3767  * scx_softlockup - sched_ext softlockup handler
3768  * @dur_s: number of seconds of CPU stuck due to soft lockup
3769  *
3770  * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can
3771  * live-lock the system by making many CPUs target the same DSQ to the point
3772  * where soft-lockup detection triggers. This function is called from
3773  * soft-lockup watchdog when the triggering point is close and tries to unjam
3774  * the system and aborting the BPF scheduler.
3775  */
3776 void scx_softlockup(u32 dur_s)
3777 {
3778 	if (!handle_lockup("soft lockup - CPU %d stuck for %us", smp_processor_id(), dur_s))
3779 		return;
3780 
3781 	printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU %d stuck for %us, disabling BPF scheduler\n",
3782 			smp_processor_id(), dur_s);
3783 }
3784 
3785 /**
3786  * scx_hardlockup - sched_ext hardlockup handler
3787  *
3788  * A poorly behaving BPF scheduler can trigger hard lockup by e.g. putting
3789  * numerous affinitized tasks in a single queue and directing all CPUs at it.
3790  * Try kicking out the current scheduler in an attempt to recover the system to
3791  * a good state before taking more drastic actions.
3792  *
3793  * Returns %true if sched_ext is enabled and abort was initiated, which may
3794  * resolve the reported hardlockdup. %false if sched_ext is not enabled or
3795  * someone else already initiated abort.
3796  */
3797 bool scx_hardlockup(int cpu)
3798 {
3799 	if (!handle_lockup("hard lockup - CPU %d", cpu))
3800 		return false;
3801 
3802 	printk_deferred(KERN_ERR "sched_ext: Hard lockup - CPU %d, disabling BPF scheduler\n",
3803 			cpu);
3804 	return true;
3805 }
3806 
3807 static u32 bypass_lb_cpu(struct scx_sched *sch, struct rq *rq,
3808 			 struct cpumask *donee_mask, struct cpumask *resched_mask,
3809 			 u32 nr_donor_target, u32 nr_donee_target)
3810 {
3811 	struct scx_dispatch_q *donor_dsq = &rq->scx.bypass_dsq;
3812 	struct task_struct *p, *n;
3813 	struct scx_dsq_list_node cursor = INIT_DSQ_LIST_CURSOR(cursor, 0, 0);
3814 	s32 delta = READ_ONCE(donor_dsq->nr) - nr_donor_target;
3815 	u32 nr_balanced = 0, min_delta_us;
3816 
3817 	/*
3818 	 * All we want to guarantee is reasonable forward progress. No reason to
3819 	 * fine tune. Assuming every task on @donor_dsq runs their full slice,
3820 	 * consider offloading iff the total queued duration is over the
3821 	 * threshold.
3822 	 */
3823 	min_delta_us = scx_bypass_lb_intv_us / SCX_BYPASS_LB_MIN_DELTA_DIV;
3824 	if (delta < DIV_ROUND_UP(min_delta_us, scx_slice_bypass_us))
3825 		return 0;
3826 
3827 	raw_spin_rq_lock_irq(rq);
3828 	raw_spin_lock(&donor_dsq->lock);
3829 	list_add(&cursor.node, &donor_dsq->list);
3830 resume:
3831 	n = container_of(&cursor, struct task_struct, scx.dsq_list);
3832 	n = nldsq_next_task(donor_dsq, n, false);
3833 
3834 	while ((p = n)) {
3835 		struct rq *donee_rq;
3836 		struct scx_dispatch_q *donee_dsq;
3837 		int donee;
3838 
3839 		n = nldsq_next_task(donor_dsq, n, false);
3840 
3841 		if (donor_dsq->nr <= nr_donor_target)
3842 			break;
3843 
3844 		if (cpumask_empty(donee_mask))
3845 			break;
3846 
3847 		donee = cpumask_any_and_distribute(donee_mask, p->cpus_ptr);
3848 		if (donee >= nr_cpu_ids)
3849 			continue;
3850 
3851 		donee_rq = cpu_rq(donee);
3852 		donee_dsq = &donee_rq->scx.bypass_dsq;
3853 
3854 		/*
3855 		 * $p's rq is not locked but $p's DSQ lock protects its
3856 		 * scheduling properties making this test safe.
3857 		 */
3858 		if (!task_can_run_on_remote_rq(sch, p, donee_rq, false))
3859 			continue;
3860 
3861 		/*
3862 		 * Moving $p from one non-local DSQ to another. The source rq
3863 		 * and DSQ are already locked. Do an abbreviated dequeue and
3864 		 * then perform enqueue without unlocking $donor_dsq.
3865 		 *
3866 		 * We don't want to drop and reacquire the lock on each
3867 		 * iteration as @donor_dsq can be very long and potentially
3868 		 * highly contended. Donee DSQs are less likely to be contended.
3869 		 * The nested locking is safe as only this LB moves tasks
3870 		 * between bypass DSQs.
3871 		 */
3872 		dispatch_dequeue_locked(p, donor_dsq);
3873 		dispatch_enqueue(sch, donee_dsq, p, SCX_ENQ_NESTED);
3874 
3875 		/*
3876 		 * $donee might have been idle and need to be woken up. No need
3877 		 * to be clever. Kick every CPU that receives tasks.
3878 		 */
3879 		cpumask_set_cpu(donee, resched_mask);
3880 
3881 		if (READ_ONCE(donee_dsq->nr) >= nr_donee_target)
3882 			cpumask_clear_cpu(donee, donee_mask);
3883 
3884 		nr_balanced++;
3885 		if (!(nr_balanced % SCX_BYPASS_LB_BATCH) && n) {
3886 			list_move_tail(&cursor.node, &n->scx.dsq_list.node);
3887 			raw_spin_unlock(&donor_dsq->lock);
3888 			raw_spin_rq_unlock_irq(rq);
3889 			cpu_relax();
3890 			raw_spin_rq_lock_irq(rq);
3891 			raw_spin_lock(&donor_dsq->lock);
3892 			goto resume;
3893 		}
3894 	}
3895 
3896 	list_del_init(&cursor.node);
3897 	raw_spin_unlock(&donor_dsq->lock);
3898 	raw_spin_rq_unlock_irq(rq);
3899 
3900 	return nr_balanced;
3901 }
3902 
3903 static void bypass_lb_node(struct scx_sched *sch, int node)
3904 {
3905 	const struct cpumask *node_mask = cpumask_of_node(node);
3906 	struct cpumask *donee_mask = scx_bypass_lb_donee_cpumask;
3907 	struct cpumask *resched_mask = scx_bypass_lb_resched_cpumask;
3908 	u32 nr_tasks = 0, nr_cpus = 0, nr_balanced = 0;
3909 	u32 nr_target, nr_donor_target;
3910 	u32 before_min = U32_MAX, before_max = 0;
3911 	u32 after_min = U32_MAX, after_max = 0;
3912 	int cpu;
3913 
3914 	/* count the target tasks and CPUs */
3915 	for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
3916 		u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr);
3917 
3918 		nr_tasks += nr;
3919 		nr_cpus++;
3920 
3921 		before_min = min(nr, before_min);
3922 		before_max = max(nr, before_max);
3923 	}
3924 
3925 	if (!nr_cpus)
3926 		return;
3927 
3928 	/*
3929 	 * We don't want CPUs to have more than $nr_donor_target tasks and
3930 	 * balancing to fill donee CPUs upto $nr_target. Once targets are
3931 	 * calculated, find the donee CPUs.
3932 	 */
3933 	nr_target = DIV_ROUND_UP(nr_tasks, nr_cpus);
3934 	nr_donor_target = DIV_ROUND_UP(nr_target * SCX_BYPASS_LB_DONOR_PCT, 100);
3935 
3936 	cpumask_clear(donee_mask);
3937 	for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
3938 		if (READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr) < nr_target)
3939 			cpumask_set_cpu(cpu, donee_mask);
3940 	}
3941 
3942 	/* iterate !donee CPUs and see if they should be offloaded */
3943 	cpumask_clear(resched_mask);
3944 	for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
3945 		struct rq *rq = cpu_rq(cpu);
3946 		struct scx_dispatch_q *donor_dsq = &rq->scx.bypass_dsq;
3947 
3948 		if (cpumask_empty(donee_mask))
3949 			break;
3950 		if (cpumask_test_cpu(cpu, donee_mask))
3951 			continue;
3952 		if (READ_ONCE(donor_dsq->nr) <= nr_donor_target)
3953 			continue;
3954 
3955 		nr_balanced += bypass_lb_cpu(sch, rq, donee_mask, resched_mask,
3956 					     nr_donor_target, nr_target);
3957 	}
3958 
3959 	for_each_cpu(cpu, resched_mask) {
3960 		struct rq *rq = cpu_rq(cpu);
3961 
3962 		raw_spin_rq_lock_irq(rq);
3963 		resched_curr(rq);
3964 		raw_spin_rq_unlock_irq(rq);
3965 	}
3966 
3967 	for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
3968 		u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr);
3969 
3970 		after_min = min(nr, after_min);
3971 		after_max = max(nr, after_max);
3972 
3973 	}
3974 
3975 	trace_sched_ext_bypass_lb(node, nr_cpus, nr_tasks, nr_balanced,
3976 				  before_min, before_max, after_min, after_max);
3977 }
3978 
3979 /*
3980  * In bypass mode, all tasks are put on the per-CPU bypass DSQs. If the machine
3981  * is over-saturated and the BPF scheduler skewed tasks into few CPUs, some
3982  * bypass DSQs can be overloaded. If there are enough tasks to saturate other
3983  * lightly loaded CPUs, such imbalance can lead to very high execution latency
3984  * on the overloaded CPUs and thus to hung tasks and RCU stalls. To avoid such
3985  * outcomes, a simple load balancing mechanism is implemented by the following
3986  * timer which runs periodically while bypass mode is in effect.
3987  */
3988 static void scx_bypass_lb_timerfn(struct timer_list *timer)
3989 {
3990 	struct scx_sched *sch;
3991 	int node;
3992 	u32 intv_us;
3993 
3994 	sch = rcu_dereference_all(scx_root);
3995 	if (unlikely(!sch) || !READ_ONCE(scx_bypass_depth))
3996 		return;
3997 
3998 	for_each_node_with_cpus(node)
3999 		bypass_lb_node(sch, node);
4000 
4001 	intv_us = READ_ONCE(scx_bypass_lb_intv_us);
4002 	if (intv_us)
4003 		mod_timer(timer, jiffies + usecs_to_jiffies(intv_us));
4004 }
4005 
4006 static DEFINE_TIMER(scx_bypass_lb_timer, scx_bypass_lb_timerfn);
4007 
4008 /**
4009  * scx_bypass - [Un]bypass scx_ops and guarantee forward progress
4010  * @bypass: true for bypass, false for unbypass
4011  *
4012  * Bypassing guarantees that all runnable tasks make forward progress without
4013  * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
4014  * be held by tasks that the BPF scheduler is forgetting to run, which
4015  * unfortunately also excludes toggling the static branches.
4016  *
4017  * Let's work around by overriding a couple ops and modifying behaviors based on
4018  * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
4019  * to force global FIFO scheduling.
4020  *
4021  * - ops.select_cpu() is ignored and the default select_cpu() is used.
4022  *
4023  * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
4024  *   %SCX_OPS_ENQ_LAST is also ignored.
4025  *
4026  * - ops.dispatch() is ignored.
4027  *
4028  * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
4029  *   can't be trusted. Whenever a tick triggers, the running task is rotated to
4030  *   the tail of the queue with core_sched_at touched.
4031  *
4032  * - pick_next_task() suppresses zero slice warning.
4033  *
4034  * - scx_kick_cpu() is disabled to avoid irq_work malfunction during PM
4035  *   operations.
4036  *
4037  * - scx_prio_less() reverts to the default core_sched_at order.
4038  */
4039 static void scx_bypass(bool bypass)
4040 {
4041 	static DEFINE_RAW_SPINLOCK(bypass_lock);
4042 	static unsigned long bypass_timestamp;
4043 	struct scx_sched *sch;
4044 	unsigned long flags;
4045 	int cpu;
4046 
4047 	raw_spin_lock_irqsave(&bypass_lock, flags);
4048 	sch = rcu_dereference_bh(scx_root);
4049 
4050 	if (bypass) {
4051 		u32 intv_us;
4052 
4053 		WRITE_ONCE(scx_bypass_depth, scx_bypass_depth + 1);
4054 		WARN_ON_ONCE(scx_bypass_depth <= 0);
4055 		if (scx_bypass_depth != 1)
4056 			goto unlock;
4057 		WRITE_ONCE(scx_slice_dfl, scx_slice_bypass_us * NSEC_PER_USEC);
4058 		bypass_timestamp = ktime_get_ns();
4059 		if (sch)
4060 			scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1);
4061 
4062 		intv_us = READ_ONCE(scx_bypass_lb_intv_us);
4063 		if (intv_us && !timer_pending(&scx_bypass_lb_timer)) {
4064 			scx_bypass_lb_timer.expires =
4065 				jiffies + usecs_to_jiffies(intv_us);
4066 			add_timer_global(&scx_bypass_lb_timer);
4067 		}
4068 	} else {
4069 		WRITE_ONCE(scx_bypass_depth, scx_bypass_depth - 1);
4070 		WARN_ON_ONCE(scx_bypass_depth < 0);
4071 		if (scx_bypass_depth != 0)
4072 			goto unlock;
4073 		WRITE_ONCE(scx_slice_dfl, SCX_SLICE_DFL);
4074 		if (sch)
4075 			scx_add_event(sch, SCX_EV_BYPASS_DURATION,
4076 				      ktime_get_ns() - bypass_timestamp);
4077 	}
4078 
4079 	/*
4080 	 * No task property is changing. We just need to make sure all currently
4081 	 * queued tasks are re-queued according to the new scx_rq_bypassing()
4082 	 * state. As an optimization, walk each rq's runnable_list instead of
4083 	 * the scx_tasks list.
4084 	 *
4085 	 * This function can't trust the scheduler and thus can't use
4086 	 * cpus_read_lock(). Walk all possible CPUs instead of online.
4087 	 */
4088 	for_each_possible_cpu(cpu) {
4089 		struct rq *rq = cpu_rq(cpu);
4090 		struct task_struct *p, *n;
4091 
4092 		raw_spin_rq_lock(rq);
4093 
4094 		if (bypass) {
4095 			WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
4096 			rq->scx.flags |= SCX_RQ_BYPASSING;
4097 		} else {
4098 			WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
4099 			rq->scx.flags &= ~SCX_RQ_BYPASSING;
4100 		}
4101 
4102 		/*
4103 		 * We need to guarantee that no tasks are on the BPF scheduler
4104 		 * while bypassing. Either we see enabled or the enable path
4105 		 * sees scx_rq_bypassing() before moving tasks to SCX.
4106 		 */
4107 		if (!scx_enabled()) {
4108 			raw_spin_rq_unlock(rq);
4109 			continue;
4110 		}
4111 
4112 		/*
4113 		 * The use of list_for_each_entry_safe_reverse() is required
4114 		 * because each task is going to be removed from and added back
4115 		 * to the runnable_list during iteration. Because they're added
4116 		 * to the tail of the list, safe reverse iteration can still
4117 		 * visit all nodes.
4118 		 */
4119 		list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
4120 						 scx.runnable_node) {
4121 			/* cycling deq/enq is enough, see the function comment */
4122 			scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_MOVE) {
4123 				/* nothing */ ;
4124 			}
4125 		}
4126 
4127 		/* resched to restore ticks and idle state */
4128 		if (cpu_online(cpu) || cpu == smp_processor_id())
4129 			resched_curr(rq);
4130 
4131 		raw_spin_rq_unlock(rq);
4132 	}
4133 
4134 unlock:
4135 	raw_spin_unlock_irqrestore(&bypass_lock, flags);
4136 }
4137 
4138 static void free_exit_info(struct scx_exit_info *ei)
4139 {
4140 	kvfree(ei->dump);
4141 	kfree(ei->msg);
4142 	kfree(ei->bt);
4143 	kfree(ei);
4144 }
4145 
4146 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
4147 {
4148 	struct scx_exit_info *ei;
4149 
4150 	ei = kzalloc(sizeof(*ei), GFP_KERNEL);
4151 	if (!ei)
4152 		return NULL;
4153 
4154 	ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
4155 	ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
4156 	ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL);
4157 
4158 	if (!ei->bt || !ei->msg || !ei->dump) {
4159 		free_exit_info(ei);
4160 		return NULL;
4161 	}
4162 
4163 	return ei;
4164 }
4165 
4166 static const char *scx_exit_reason(enum scx_exit_kind kind)
4167 {
4168 	switch (kind) {
4169 	case SCX_EXIT_UNREG:
4170 		return "unregistered from user space";
4171 	case SCX_EXIT_UNREG_BPF:
4172 		return "unregistered from BPF";
4173 	case SCX_EXIT_UNREG_KERN:
4174 		return "unregistered from the main kernel";
4175 	case SCX_EXIT_SYSRQ:
4176 		return "disabled by sysrq-S";
4177 	case SCX_EXIT_ERROR:
4178 		return "runtime error";
4179 	case SCX_EXIT_ERROR_BPF:
4180 		return "scx_bpf_error";
4181 	case SCX_EXIT_ERROR_STALL:
4182 		return "runnable task stall";
4183 	default:
4184 		return "<UNKNOWN>";
4185 	}
4186 }
4187 
4188 static void free_kick_syncs(void)
4189 {
4190 	int cpu;
4191 
4192 	for_each_possible_cpu(cpu) {
4193 		struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu);
4194 		struct scx_kick_syncs *to_free;
4195 
4196 		to_free = rcu_replace_pointer(*ksyncs, NULL, true);
4197 		if (to_free)
4198 			kvfree_rcu(to_free, rcu);
4199 	}
4200 }
4201 
4202 static void scx_disable_workfn(struct kthread_work *work)
4203 {
4204 	struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
4205 	struct scx_exit_info *ei = sch->exit_info;
4206 	struct scx_task_iter sti;
4207 	struct task_struct *p;
4208 	int kind, cpu;
4209 
4210 	kind = atomic_read(&sch->exit_kind);
4211 	while (true) {
4212 		if (kind == SCX_EXIT_DONE)	/* already disabled? */
4213 			return;
4214 		WARN_ON_ONCE(kind == SCX_EXIT_NONE);
4215 		if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE))
4216 			break;
4217 	}
4218 	ei->kind = kind;
4219 	ei->reason = scx_exit_reason(ei->kind);
4220 
4221 	/* guarantee forward progress by bypassing scx_ops */
4222 	scx_bypass(true);
4223 	WRITE_ONCE(scx_aborting, false);
4224 
4225 	switch (scx_set_enable_state(SCX_DISABLING)) {
4226 	case SCX_DISABLING:
4227 		WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
4228 		break;
4229 	case SCX_DISABLED:
4230 		pr_warn("sched_ext: ops error detected without ops (%s)\n",
4231 			sch->exit_info->msg);
4232 		WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
4233 		goto done;
4234 	default:
4235 		break;
4236 	}
4237 
4238 	/*
4239 	 * Here, every runnable task is guaranteed to make forward progress and
4240 	 * we can safely use blocking synchronization constructs. Actually
4241 	 * disable ops.
4242 	 */
4243 	mutex_lock(&scx_enable_mutex);
4244 
4245 	static_branch_disable(&__scx_switched_all);
4246 	WRITE_ONCE(scx_switching_all, false);
4247 
4248 	/*
4249 	 * Shut down cgroup support before tasks so that the cgroup attach path
4250 	 * doesn't race against scx_exit_task().
4251 	 */
4252 	scx_cgroup_lock();
4253 	scx_cgroup_exit(sch);
4254 	scx_cgroup_unlock();
4255 
4256 	/*
4257 	 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
4258 	 * must be switched out and exited synchronously.
4259 	 */
4260 	percpu_down_write(&scx_fork_rwsem);
4261 
4262 	scx_init_task_enabled = false;
4263 
4264 	scx_task_iter_start(&sti);
4265 	while ((p = scx_task_iter_next_locked(&sti))) {
4266 		unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
4267 		const struct sched_class *old_class = p->sched_class;
4268 		const struct sched_class *new_class = scx_setscheduler_class(p);
4269 
4270 		update_rq_clock(task_rq(p));
4271 
4272 		if (old_class != new_class)
4273 			queue_flags |= DEQUEUE_CLASS;
4274 
4275 		scoped_guard (sched_change, p, queue_flags) {
4276 			p->sched_class = new_class;
4277 		}
4278 
4279 		scx_exit_task(p);
4280 	}
4281 	scx_task_iter_stop(&sti);
4282 	percpu_up_write(&scx_fork_rwsem);
4283 
4284 	/*
4285 	 * Invalidate all the rq clocks to prevent getting outdated
4286 	 * rq clocks from a previous scx scheduler.
4287 	 */
4288 	for_each_possible_cpu(cpu) {
4289 		struct rq *rq = cpu_rq(cpu);
4290 		scx_rq_clock_invalidate(rq);
4291 	}
4292 
4293 	/* no task is on scx, turn off all the switches and flush in-progress calls */
4294 	static_branch_disable(&__scx_enabled);
4295 	bitmap_zero(sch->has_op, SCX_OPI_END);
4296 	scx_idle_disable();
4297 	synchronize_rcu();
4298 
4299 	if (ei->kind >= SCX_EXIT_ERROR) {
4300 		pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4301 		       sch->ops.name, ei->reason);
4302 
4303 		if (ei->msg[0] != '\0')
4304 			pr_err("sched_ext: %s: %s\n", sch->ops.name, ei->msg);
4305 #ifdef CONFIG_STACKTRACE
4306 		stack_trace_print(ei->bt, ei->bt_len, 2);
4307 #endif
4308 	} else {
4309 		pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4310 			sch->ops.name, ei->reason);
4311 	}
4312 
4313 	if (sch->ops.exit)
4314 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, exit, NULL, ei);
4315 
4316 	cancel_delayed_work_sync(&scx_watchdog_work);
4317 
4318 	/*
4319 	 * scx_root clearing must be inside cpus_read_lock(). See
4320 	 * handle_hotplug().
4321 	 */
4322 	cpus_read_lock();
4323 	RCU_INIT_POINTER(scx_root, NULL);
4324 	cpus_read_unlock();
4325 
4326 	/*
4327 	 * Delete the kobject from the hierarchy synchronously. Otherwise, sysfs
4328 	 * could observe an object of the same name still in the hierarchy when
4329 	 * the next scheduler is loaded.
4330 	 */
4331 	kobject_del(&sch->kobj);
4332 
4333 	free_percpu(scx_dsp_ctx);
4334 	scx_dsp_ctx = NULL;
4335 	scx_dsp_max_batch = 0;
4336 	free_kick_syncs();
4337 
4338 	if (scx_bypassed_for_enable) {
4339 		scx_bypassed_for_enable = false;
4340 		scx_bypass(false);
4341 	}
4342 
4343 	mutex_unlock(&scx_enable_mutex);
4344 
4345 	WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
4346 done:
4347 	scx_bypass(false);
4348 }
4349 
4350 static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind)
4351 {
4352 	int none = SCX_EXIT_NONE;
4353 
4354 	if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
4355 		return false;
4356 
4357 	/*
4358 	 * Some CPUs may be trapped in the dispatch paths. Set the aborting
4359 	 * flag to break potential live-lock scenarios, ensuring we can
4360 	 * successfully reach scx_bypass().
4361 	 */
4362 	WRITE_ONCE(scx_aborting, true);
4363 	return true;
4364 }
4365 
4366 static void scx_disable(enum scx_exit_kind kind)
4367 {
4368 	struct scx_sched *sch;
4369 
4370 	if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
4371 		kind = SCX_EXIT_ERROR;
4372 
4373 	rcu_read_lock();
4374 	sch = rcu_dereference(scx_root);
4375 	if (sch) {
4376 		scx_claim_exit(sch, kind);
4377 		kthread_queue_work(sch->helper, &sch->disable_work);
4378 	}
4379 	rcu_read_unlock();
4380 }
4381 
4382 static void dump_newline(struct seq_buf *s)
4383 {
4384 	trace_sched_ext_dump("");
4385 
4386 	/* @s may be zero sized and seq_buf triggers WARN if so */
4387 	if (s->size)
4388 		seq_buf_putc(s, '\n');
4389 }
4390 
4391 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
4392 {
4393 	va_list args;
4394 
4395 #ifdef CONFIG_TRACEPOINTS
4396 	if (trace_sched_ext_dump_enabled()) {
4397 		/* protected by scx_dump_state()::dump_lock */
4398 		static char line_buf[SCX_EXIT_MSG_LEN];
4399 
4400 		va_start(args, fmt);
4401 		vscnprintf(line_buf, sizeof(line_buf), fmt, args);
4402 		va_end(args);
4403 
4404 		trace_sched_ext_dump(line_buf);
4405 	}
4406 #endif
4407 	/* @s may be zero sized and seq_buf triggers WARN if so */
4408 	if (s->size) {
4409 		va_start(args, fmt);
4410 		seq_buf_vprintf(s, fmt, args);
4411 		va_end(args);
4412 
4413 		seq_buf_putc(s, '\n');
4414 	}
4415 }
4416 
4417 static void dump_stack_trace(struct seq_buf *s, const char *prefix,
4418 			     const unsigned long *bt, unsigned int len)
4419 {
4420 	unsigned int i;
4421 
4422 	for (i = 0; i < len; i++)
4423 		dump_line(s, "%s%pS", prefix, (void *)bt[i]);
4424 }
4425 
4426 static void ops_dump_init(struct seq_buf *s, const char *prefix)
4427 {
4428 	struct scx_dump_data *dd = &scx_dump_data;
4429 
4430 	lockdep_assert_irqs_disabled();
4431 
4432 	dd->cpu = smp_processor_id();		/* allow scx_bpf_dump() */
4433 	dd->first = true;
4434 	dd->cursor = 0;
4435 	dd->s = s;
4436 	dd->prefix = prefix;
4437 }
4438 
4439 static void ops_dump_flush(void)
4440 {
4441 	struct scx_dump_data *dd = &scx_dump_data;
4442 	char *line = dd->buf.line;
4443 
4444 	if (!dd->cursor)
4445 		return;
4446 
4447 	/*
4448 	 * There's something to flush and this is the first line. Insert a blank
4449 	 * line to distinguish ops dump.
4450 	 */
4451 	if (dd->first) {
4452 		dump_newline(dd->s);
4453 		dd->first = false;
4454 	}
4455 
4456 	/*
4457 	 * There may be multiple lines in $line. Scan and emit each line
4458 	 * separately.
4459 	 */
4460 	while (true) {
4461 		char *end = line;
4462 		char c;
4463 
4464 		while (*end != '\n' && *end != '\0')
4465 			end++;
4466 
4467 		/*
4468 		 * If $line overflowed, it may not have newline at the end.
4469 		 * Always emit with a newline.
4470 		 */
4471 		c = *end;
4472 		*end = '\0';
4473 		dump_line(dd->s, "%s%s", dd->prefix, line);
4474 		if (c == '\0')
4475 			break;
4476 
4477 		/* move to the next line */
4478 		end++;
4479 		if (*end == '\0')
4480 			break;
4481 		line = end;
4482 	}
4483 
4484 	dd->cursor = 0;
4485 }
4486 
4487 static void ops_dump_exit(void)
4488 {
4489 	ops_dump_flush();
4490 	scx_dump_data.cpu = -1;
4491 }
4492 
4493 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
4494 			  struct task_struct *p, char marker)
4495 {
4496 	static unsigned long bt[SCX_EXIT_BT_LEN];
4497 	struct scx_sched *sch = scx_root;
4498 	char dsq_id_buf[19] = "(n/a)";
4499 	unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
4500 	unsigned int bt_len = 0;
4501 
4502 	if (p->scx.dsq)
4503 		scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
4504 			  (unsigned long long)p->scx.dsq->id);
4505 
4506 	dump_newline(s);
4507 	dump_line(s, " %c%c %s[%d] %+ldms",
4508 		  marker, task_state_to_char(p), p->comm, p->pid,
4509 		  jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
4510 	dump_line(s, "      scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
4511 		  scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
4512 		  p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
4513 		  ops_state >> SCX_OPSS_QSEQ_SHIFT);
4514 	dump_line(s, "      sticky/holding_cpu=%d/%d dsq_id=%s",
4515 		  p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf);
4516 	dump_line(s, "      dsq_vtime=%llu slice=%llu weight=%u",
4517 		  p->scx.dsq_vtime, p->scx.slice, p->scx.weight);
4518 	dump_line(s, "      cpus=%*pb no_mig=%u", cpumask_pr_args(p->cpus_ptr),
4519 		  p->migration_disabled);
4520 
4521 	if (SCX_HAS_OP(sch, dump_task)) {
4522 		ops_dump_init(s, "    ");
4523 		SCX_CALL_OP(sch, SCX_KF_REST, dump_task, NULL, dctx, p);
4524 		ops_dump_exit();
4525 	}
4526 
4527 #ifdef CONFIG_STACKTRACE
4528 	bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
4529 #endif
4530 	if (bt_len) {
4531 		dump_newline(s);
4532 		dump_stack_trace(s, "    ", bt, bt_len);
4533 	}
4534 }
4535 
4536 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
4537 {
4538 	static DEFINE_SPINLOCK(dump_lock);
4539 	static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
4540 	struct scx_sched *sch = scx_root;
4541 	struct scx_dump_ctx dctx = {
4542 		.kind = ei->kind,
4543 		.exit_code = ei->exit_code,
4544 		.reason = ei->reason,
4545 		.at_ns = ktime_get_ns(),
4546 		.at_jiffies = jiffies,
4547 	};
4548 	struct seq_buf s;
4549 	struct scx_event_stats events;
4550 	unsigned long flags;
4551 	char *buf;
4552 	int cpu;
4553 
4554 	spin_lock_irqsave(&dump_lock, flags);
4555 
4556 	seq_buf_init(&s, ei->dump, dump_len);
4557 
4558 	if (ei->kind == SCX_EXIT_NONE) {
4559 		dump_line(&s, "Debug dump triggered by %s", ei->reason);
4560 	} else {
4561 		dump_line(&s, "%s[%d] triggered exit kind %d:",
4562 			  current->comm, current->pid, ei->kind);
4563 		dump_line(&s, "  %s (%s)", ei->reason, ei->msg);
4564 		dump_newline(&s);
4565 		dump_line(&s, "Backtrace:");
4566 		dump_stack_trace(&s, "  ", ei->bt, ei->bt_len);
4567 	}
4568 
4569 	if (SCX_HAS_OP(sch, dump)) {
4570 		ops_dump_init(&s, "");
4571 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, dump, NULL, &dctx);
4572 		ops_dump_exit();
4573 	}
4574 
4575 	dump_newline(&s);
4576 	dump_line(&s, "CPU states");
4577 	dump_line(&s, "----------");
4578 
4579 	for_each_possible_cpu(cpu) {
4580 		struct rq *rq = cpu_rq(cpu);
4581 		struct rq_flags rf;
4582 		struct task_struct *p;
4583 		struct seq_buf ns;
4584 		size_t avail, used;
4585 		bool idle;
4586 
4587 		rq_lock_irqsave(rq, &rf);
4588 
4589 		idle = list_empty(&rq->scx.runnable_list) &&
4590 			rq->curr->sched_class == &idle_sched_class;
4591 
4592 		if (idle && !SCX_HAS_OP(sch, dump_cpu))
4593 			goto next;
4594 
4595 		/*
4596 		 * We don't yet know whether ops.dump_cpu() will produce output
4597 		 * and we may want to skip the default CPU dump if it doesn't.
4598 		 * Use a nested seq_buf to generate the standard dump so that we
4599 		 * can decide whether to commit later.
4600 		 */
4601 		avail = seq_buf_get_buf(&s, &buf);
4602 		seq_buf_init(&ns, buf, avail);
4603 
4604 		dump_newline(&ns);
4605 		dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu ksync=%lu",
4606 			  cpu, rq->scx.nr_running, rq->scx.flags,
4607 			  rq->scx.cpu_released, rq->scx.ops_qseq,
4608 			  rq->scx.kick_sync);
4609 		dump_line(&ns, "          curr=%s[%d] class=%ps",
4610 			  rq->curr->comm, rq->curr->pid,
4611 			  rq->curr->sched_class);
4612 		if (!cpumask_empty(rq->scx.cpus_to_kick))
4613 			dump_line(&ns, "  cpus_to_kick   : %*pb",
4614 				  cpumask_pr_args(rq->scx.cpus_to_kick));
4615 		if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
4616 			dump_line(&ns, "  idle_to_kick   : %*pb",
4617 				  cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
4618 		if (!cpumask_empty(rq->scx.cpus_to_preempt))
4619 			dump_line(&ns, "  cpus_to_preempt: %*pb",
4620 				  cpumask_pr_args(rq->scx.cpus_to_preempt));
4621 		if (!cpumask_empty(rq->scx.cpus_to_wait))
4622 			dump_line(&ns, "  cpus_to_wait   : %*pb",
4623 				  cpumask_pr_args(rq->scx.cpus_to_wait));
4624 
4625 		used = seq_buf_used(&ns);
4626 		if (SCX_HAS_OP(sch, dump_cpu)) {
4627 			ops_dump_init(&ns, "  ");
4628 			SCX_CALL_OP(sch, SCX_KF_REST, dump_cpu, NULL,
4629 				    &dctx, cpu, idle);
4630 			ops_dump_exit();
4631 		}
4632 
4633 		/*
4634 		 * If idle && nothing generated by ops.dump_cpu(), there's
4635 		 * nothing interesting. Skip.
4636 		 */
4637 		if (idle && used == seq_buf_used(&ns))
4638 			goto next;
4639 
4640 		/*
4641 		 * $s may already have overflowed when $ns was created. If so,
4642 		 * calling commit on it will trigger BUG.
4643 		 */
4644 		if (avail) {
4645 			seq_buf_commit(&s, seq_buf_used(&ns));
4646 			if (seq_buf_has_overflowed(&ns))
4647 				seq_buf_set_overflow(&s);
4648 		}
4649 
4650 		if (rq->curr->sched_class == &ext_sched_class)
4651 			scx_dump_task(&s, &dctx, rq->curr, '*');
4652 
4653 		list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
4654 			scx_dump_task(&s, &dctx, p, ' ');
4655 	next:
4656 		rq_unlock_irqrestore(rq, &rf);
4657 	}
4658 
4659 	dump_newline(&s);
4660 	dump_line(&s, "Event counters");
4661 	dump_line(&s, "--------------");
4662 
4663 	scx_read_events(sch, &events);
4664 	scx_dump_event(s, &events, SCX_EV_SELECT_CPU_FALLBACK);
4665 	scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
4666 	scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST);
4667 	scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_EXITING);
4668 	scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
4669 	scx_dump_event(s, &events, SCX_EV_REFILL_SLICE_DFL);
4670 	scx_dump_event(s, &events, SCX_EV_BYPASS_DURATION);
4671 	scx_dump_event(s, &events, SCX_EV_BYPASS_DISPATCH);
4672 	scx_dump_event(s, &events, SCX_EV_BYPASS_ACTIVATE);
4673 
4674 	if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
4675 		memcpy(ei->dump + dump_len - sizeof(trunc_marker),
4676 		       trunc_marker, sizeof(trunc_marker));
4677 
4678 	spin_unlock_irqrestore(&dump_lock, flags);
4679 }
4680 
4681 static void scx_error_irq_workfn(struct irq_work *irq_work)
4682 {
4683 	struct scx_sched *sch = container_of(irq_work, struct scx_sched, error_irq_work);
4684 	struct scx_exit_info *ei = sch->exit_info;
4685 
4686 	if (ei->kind >= SCX_EXIT_ERROR)
4687 		scx_dump_state(ei, sch->ops.exit_dump_len);
4688 
4689 	kthread_queue_work(sch->helper, &sch->disable_work);
4690 }
4691 
4692 static bool scx_vexit(struct scx_sched *sch,
4693 		      enum scx_exit_kind kind, s64 exit_code,
4694 		      const char *fmt, va_list args)
4695 {
4696 	struct scx_exit_info *ei = sch->exit_info;
4697 
4698 	if (!scx_claim_exit(sch, kind))
4699 		return false;
4700 
4701 	ei->exit_code = exit_code;
4702 #ifdef CONFIG_STACKTRACE
4703 	if (kind >= SCX_EXIT_ERROR)
4704 		ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
4705 #endif
4706 	vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
4707 
4708 	/*
4709 	 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
4710 	 * in scx_disable_workfn().
4711 	 */
4712 	ei->kind = kind;
4713 	ei->reason = scx_exit_reason(ei->kind);
4714 
4715 	irq_work_queue(&sch->error_irq_work);
4716 	return true;
4717 }
4718 
4719 static int alloc_kick_syncs(void)
4720 {
4721 	int cpu;
4722 
4723 	/*
4724 	 * Allocate per-CPU arrays sized by nr_cpu_ids. Use kvzalloc as size
4725 	 * can exceed percpu allocator limits on large machines.
4726 	 */
4727 	for_each_possible_cpu(cpu) {
4728 		struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu);
4729 		struct scx_kick_syncs *new_ksyncs;
4730 
4731 		WARN_ON_ONCE(rcu_access_pointer(*ksyncs));
4732 
4733 		new_ksyncs = kvzalloc_node(struct_size(new_ksyncs, syncs, nr_cpu_ids),
4734 					   GFP_KERNEL, cpu_to_node(cpu));
4735 		if (!new_ksyncs) {
4736 			free_kick_syncs();
4737 			return -ENOMEM;
4738 		}
4739 
4740 		rcu_assign_pointer(*ksyncs, new_ksyncs);
4741 	}
4742 
4743 	return 0;
4744 }
4745 
4746 static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
4747 {
4748 	struct scx_sched *sch;
4749 	int node, ret;
4750 
4751 	sch = kzalloc(sizeof(*sch), GFP_KERNEL);
4752 	if (!sch)
4753 		return ERR_PTR(-ENOMEM);
4754 
4755 	sch->exit_info = alloc_exit_info(ops->exit_dump_len);
4756 	if (!sch->exit_info) {
4757 		ret = -ENOMEM;
4758 		goto err_free_sch;
4759 	}
4760 
4761 	ret = rhashtable_init(&sch->dsq_hash, &dsq_hash_params);
4762 	if (ret < 0)
4763 		goto err_free_ei;
4764 
4765 	sch->global_dsqs = kcalloc(nr_node_ids, sizeof(sch->global_dsqs[0]),
4766 				   GFP_KERNEL);
4767 	if (!sch->global_dsqs) {
4768 		ret = -ENOMEM;
4769 		goto err_free_hash;
4770 	}
4771 
4772 	for_each_node_state(node, N_POSSIBLE) {
4773 		struct scx_dispatch_q *dsq;
4774 
4775 		dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
4776 		if (!dsq) {
4777 			ret = -ENOMEM;
4778 			goto err_free_gdsqs;
4779 		}
4780 
4781 		init_dsq(dsq, SCX_DSQ_GLOBAL);
4782 		sch->global_dsqs[node] = dsq;
4783 	}
4784 
4785 	sch->pcpu = alloc_percpu(struct scx_sched_pcpu);
4786 	if (!sch->pcpu)
4787 		goto err_free_gdsqs;
4788 
4789 	sch->helper = kthread_run_worker(0, "sched_ext_helper");
4790 	if (IS_ERR(sch->helper)) {
4791 		ret = PTR_ERR(sch->helper);
4792 		goto err_free_pcpu;
4793 	}
4794 
4795 	sched_set_fifo(sch->helper->task);
4796 
4797 	atomic_set(&sch->exit_kind, SCX_EXIT_NONE);
4798 	init_irq_work(&sch->error_irq_work, scx_error_irq_workfn);
4799 	kthread_init_work(&sch->disable_work, scx_disable_workfn);
4800 	sch->ops = *ops;
4801 	ops->priv = sch;
4802 
4803 	sch->kobj.kset = scx_kset;
4804 	ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root");
4805 	if (ret < 0)
4806 		goto err_stop_helper;
4807 
4808 	return sch;
4809 
4810 err_stop_helper:
4811 	kthread_destroy_worker(sch->helper);
4812 err_free_pcpu:
4813 	free_percpu(sch->pcpu);
4814 err_free_gdsqs:
4815 	for_each_node_state(node, N_POSSIBLE)
4816 		kfree(sch->global_dsqs[node]);
4817 	kfree(sch->global_dsqs);
4818 err_free_hash:
4819 	rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
4820 err_free_ei:
4821 	free_exit_info(sch->exit_info);
4822 err_free_sch:
4823 	kfree(sch);
4824 	return ERR_PTR(ret);
4825 }
4826 
4827 static int check_hotplug_seq(struct scx_sched *sch,
4828 			      const struct sched_ext_ops *ops)
4829 {
4830 	unsigned long long global_hotplug_seq;
4831 
4832 	/*
4833 	 * If a hotplug event has occurred between when a scheduler was
4834 	 * initialized, and when we were able to attach, exit and notify user
4835 	 * space about it.
4836 	 */
4837 	if (ops->hotplug_seq) {
4838 		global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
4839 		if (ops->hotplug_seq != global_hotplug_seq) {
4840 			scx_exit(sch, SCX_EXIT_UNREG_KERN,
4841 				 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
4842 				 "expected hotplug seq %llu did not match actual %llu",
4843 				 ops->hotplug_seq, global_hotplug_seq);
4844 			return -EBUSY;
4845 		}
4846 	}
4847 
4848 	return 0;
4849 }
4850 
4851 static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops)
4852 {
4853 	/*
4854 	 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
4855 	 * ops.enqueue() callback isn't implemented.
4856 	 */
4857 	if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
4858 		scx_error(sch, "SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
4859 		return -EINVAL;
4860 	}
4861 
4862 	/*
4863 	 * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle
4864 	 * selection policy to be enabled.
4865 	 */
4866 	if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) &&
4867 	    (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) {
4868 		scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled");
4869 		return -EINVAL;
4870 	}
4871 
4872 	if (ops->flags & SCX_OPS_HAS_CGROUP_WEIGHT)
4873 		pr_warn("SCX_OPS_HAS_CGROUP_WEIGHT is deprecated and a noop\n");
4874 
4875 	if (ops->cpu_acquire || ops->cpu_release)
4876 		pr_warn("ops->cpu_acquire/release() are deprecated, use sched_switch TP instead\n");
4877 
4878 	return 0;
4879 }
4880 
4881 static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
4882 {
4883 	struct scx_sched *sch;
4884 	struct scx_task_iter sti;
4885 	struct task_struct *p;
4886 	unsigned long timeout;
4887 	int i, cpu, ret;
4888 
4889 	if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
4890 			   cpu_possible_mask)) {
4891 		pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
4892 		return -EINVAL;
4893 	}
4894 
4895 	mutex_lock(&scx_enable_mutex);
4896 
4897 	if (scx_enable_state() != SCX_DISABLED) {
4898 		ret = -EBUSY;
4899 		goto err_unlock;
4900 	}
4901 
4902 	ret = alloc_kick_syncs();
4903 	if (ret)
4904 		goto err_unlock;
4905 
4906 	sch = scx_alloc_and_add_sched(ops);
4907 	if (IS_ERR(sch)) {
4908 		ret = PTR_ERR(sch);
4909 		goto err_free_ksyncs;
4910 	}
4911 
4912 	/*
4913 	 * Transition to ENABLING and clear exit info to arm the disable path.
4914 	 * Failure triggers full disabling from here on.
4915 	 */
4916 	WARN_ON_ONCE(scx_set_enable_state(SCX_ENABLING) != SCX_DISABLED);
4917 	WARN_ON_ONCE(scx_root);
4918 	if (WARN_ON_ONCE(READ_ONCE(scx_aborting)))
4919 		WRITE_ONCE(scx_aborting, false);
4920 
4921 	atomic_long_set(&scx_nr_rejected, 0);
4922 
4923 	for_each_possible_cpu(cpu)
4924 		cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
4925 
4926 	/*
4927 	 * Keep CPUs stable during enable so that the BPF scheduler can track
4928 	 * online CPUs by watching ->on/offline_cpu() after ->init().
4929 	 */
4930 	cpus_read_lock();
4931 
4932 	/*
4933 	 * Make the scheduler instance visible. Must be inside cpus_read_lock().
4934 	 * See handle_hotplug().
4935 	 */
4936 	rcu_assign_pointer(scx_root, sch);
4937 
4938 	scx_idle_enable(ops);
4939 
4940 	if (sch->ops.init) {
4941 		ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init, NULL);
4942 		if (ret) {
4943 			ret = ops_sanitize_err(sch, "init", ret);
4944 			cpus_read_unlock();
4945 			scx_error(sch, "ops.init() failed (%d)", ret);
4946 			goto err_disable;
4947 		}
4948 		sch->exit_info->flags |= SCX_EFLAG_INITIALIZED;
4949 	}
4950 
4951 	for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
4952 		if (((void (**)(void))ops)[i])
4953 			set_bit(i, sch->has_op);
4954 
4955 	ret = check_hotplug_seq(sch, ops);
4956 	if (ret) {
4957 		cpus_read_unlock();
4958 		goto err_disable;
4959 	}
4960 	scx_idle_update_selcpu_topology(ops);
4961 
4962 	cpus_read_unlock();
4963 
4964 	ret = validate_ops(sch, ops);
4965 	if (ret)
4966 		goto err_disable;
4967 
4968 	WARN_ON_ONCE(scx_dsp_ctx);
4969 	scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
4970 	scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
4971 						   scx_dsp_max_batch),
4972 				     __alignof__(struct scx_dsp_ctx));
4973 	if (!scx_dsp_ctx) {
4974 		ret = -ENOMEM;
4975 		goto err_disable;
4976 	}
4977 
4978 	if (ops->timeout_ms)
4979 		timeout = msecs_to_jiffies(ops->timeout_ms);
4980 	else
4981 		timeout = SCX_WATCHDOG_MAX_TIMEOUT;
4982 
4983 	WRITE_ONCE(scx_watchdog_timeout, timeout);
4984 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
4985 	queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
4986 			   scx_watchdog_timeout / 2);
4987 
4988 	/*
4989 	 * Once __scx_enabled is set, %current can be switched to SCX anytime.
4990 	 * This can lead to stalls as some BPF schedulers (e.g. userspace
4991 	 * scheduling) may not function correctly before all tasks are switched.
4992 	 * Init in bypass mode to guarantee forward progress.
4993 	 */
4994 	scx_bypass(true);
4995 	scx_bypassed_for_enable = true;
4996 
4997 	for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
4998 		if (((void (**)(void))ops)[i])
4999 			set_bit(i, sch->has_op);
5000 
5001 	if (sch->ops.cpu_acquire || sch->ops.cpu_release)
5002 		sch->ops.flags |= SCX_OPS_HAS_CPU_PREEMPT;
5003 
5004 	/*
5005 	 * Lock out forks, cgroup on/offlining and moves before opening the
5006 	 * floodgate so that they don't wander into the operations prematurely.
5007 	 */
5008 	percpu_down_write(&scx_fork_rwsem);
5009 
5010 	WARN_ON_ONCE(scx_init_task_enabled);
5011 	scx_init_task_enabled = true;
5012 
5013 	/*
5014 	 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
5015 	 * preventing new tasks from being added. No need to exclude tasks
5016 	 * leaving as sched_ext_free() can handle both prepped and enabled
5017 	 * tasks. Prep all tasks first and then enable them with preemption
5018 	 * disabled.
5019 	 *
5020 	 * All cgroups should be initialized before scx_init_task() so that the
5021 	 * BPF scheduler can reliably track each task's cgroup membership from
5022 	 * scx_init_task(). Lock out cgroup on/offlining and task migrations
5023 	 * while tasks are being initialized so that scx_cgroup_can_attach()
5024 	 * never sees uninitialized tasks.
5025 	 */
5026 	scx_cgroup_lock();
5027 	ret = scx_cgroup_init(sch);
5028 	if (ret)
5029 		goto err_disable_unlock_all;
5030 
5031 	scx_task_iter_start(&sti);
5032 	while ((p = scx_task_iter_next_locked(&sti))) {
5033 		/*
5034 		 * @p may already be dead, have lost all its usages counts and
5035 		 * be waiting for RCU grace period before being freed. @p can't
5036 		 * be initialized for SCX in such cases and should be ignored.
5037 		 */
5038 		if (!tryget_task_struct(p))
5039 			continue;
5040 
5041 		scx_task_iter_unlock(&sti);
5042 
5043 		ret = scx_init_task(p, task_group(p), false);
5044 		if (ret) {
5045 			put_task_struct(p);
5046 			scx_task_iter_stop(&sti);
5047 			scx_error(sch, "ops.init_task() failed (%d) for %s[%d]",
5048 				  ret, p->comm, p->pid);
5049 			goto err_disable_unlock_all;
5050 		}
5051 
5052 		scx_set_task_state(p, SCX_TASK_READY);
5053 
5054 		put_task_struct(p);
5055 	}
5056 	scx_task_iter_stop(&sti);
5057 	scx_cgroup_unlock();
5058 	percpu_up_write(&scx_fork_rwsem);
5059 
5060 	/*
5061 	 * All tasks are READY. It's safe to turn on scx_enabled() and switch
5062 	 * all eligible tasks.
5063 	 */
5064 	WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
5065 	static_branch_enable(&__scx_enabled);
5066 
5067 	/*
5068 	 * We're fully committed and can't fail. The task READY -> ENABLED
5069 	 * transitions here are synchronized against sched_ext_free() through
5070 	 * scx_tasks_lock.
5071 	 */
5072 	percpu_down_write(&scx_fork_rwsem);
5073 	scx_task_iter_start(&sti);
5074 	while ((p = scx_task_iter_next_locked(&sti))) {
5075 		unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
5076 		const struct sched_class *old_class = p->sched_class;
5077 		const struct sched_class *new_class = scx_setscheduler_class(p);
5078 
5079 		if (scx_get_task_state(p) != SCX_TASK_READY)
5080 			continue;
5081 
5082 		if (old_class != new_class)
5083 			queue_flags |= DEQUEUE_CLASS;
5084 
5085 		scoped_guard (sched_change, p, queue_flags) {
5086 			p->scx.slice = READ_ONCE(scx_slice_dfl);
5087 			p->sched_class = new_class;
5088 		}
5089 	}
5090 	scx_task_iter_stop(&sti);
5091 	percpu_up_write(&scx_fork_rwsem);
5092 
5093 	scx_bypassed_for_enable = false;
5094 	scx_bypass(false);
5095 
5096 	if (!scx_tryset_enable_state(SCX_ENABLED, SCX_ENABLING)) {
5097 		WARN_ON_ONCE(atomic_read(&sch->exit_kind) == SCX_EXIT_NONE);
5098 		goto err_disable;
5099 	}
5100 
5101 	if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
5102 		static_branch_enable(&__scx_switched_all);
5103 
5104 	pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
5105 		sch->ops.name, scx_switched_all() ? "" : " (partial)");
5106 	kobject_uevent(&sch->kobj, KOBJ_ADD);
5107 	mutex_unlock(&scx_enable_mutex);
5108 
5109 	atomic_long_inc(&scx_enable_seq);
5110 
5111 	return 0;
5112 
5113 err_free_ksyncs:
5114 	free_kick_syncs();
5115 err_unlock:
5116 	mutex_unlock(&scx_enable_mutex);
5117 	return ret;
5118 
5119 err_disable_unlock_all:
5120 	scx_cgroup_unlock();
5121 	percpu_up_write(&scx_fork_rwsem);
5122 	/* we'll soon enter disable path, keep bypass on */
5123 err_disable:
5124 	mutex_unlock(&scx_enable_mutex);
5125 	/*
5126 	 * Returning an error code here would not pass all the error information
5127 	 * to userspace. Record errno using scx_error() for cases scx_error()
5128 	 * wasn't already invoked and exit indicating success so that the error
5129 	 * is notified through ops.exit() with all the details.
5130 	 *
5131 	 * Flush scx_disable_work to ensure that error is reported before init
5132 	 * completion. sch's base reference will be put by bpf_scx_unreg().
5133 	 */
5134 	scx_error(sch, "scx_enable() failed (%d)", ret);
5135 	kthread_flush_work(&sch->disable_work);
5136 	return 0;
5137 }
5138 
5139 
5140 /********************************************************************************
5141  * bpf_struct_ops plumbing.
5142  */
5143 #include <linux/bpf_verifier.h>
5144 #include <linux/bpf.h>
5145 #include <linux/btf.h>
5146 
5147 static const struct btf_type *task_struct_type;
5148 
5149 static bool bpf_scx_is_valid_access(int off, int size,
5150 				    enum bpf_access_type type,
5151 				    const struct bpf_prog *prog,
5152 				    struct bpf_insn_access_aux *info)
5153 {
5154 	if (type != BPF_READ)
5155 		return false;
5156 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
5157 		return false;
5158 	if (off % size != 0)
5159 		return false;
5160 
5161 	return btf_ctx_access(off, size, type, prog, info);
5162 }
5163 
5164 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
5165 				     const struct bpf_reg_state *reg, int off,
5166 				     int size)
5167 {
5168 	const struct btf_type *t;
5169 
5170 	t = btf_type_by_id(reg->btf, reg->btf_id);
5171 	if (t == task_struct_type) {
5172 		if (off >= offsetof(struct task_struct, scx.slice) &&
5173 		    off + size <= offsetofend(struct task_struct, scx.slice))
5174 			return SCALAR_VALUE;
5175 		if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
5176 		    off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
5177 			return SCALAR_VALUE;
5178 		if (off >= offsetof(struct task_struct, scx.disallow) &&
5179 		    off + size <= offsetofend(struct task_struct, scx.disallow))
5180 			return SCALAR_VALUE;
5181 	}
5182 
5183 	return -EACCES;
5184 }
5185 
5186 static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
5187 	.get_func_proto = bpf_base_func_proto,
5188 	.is_valid_access = bpf_scx_is_valid_access,
5189 	.btf_struct_access = bpf_scx_btf_struct_access,
5190 };
5191 
5192 static int bpf_scx_init_member(const struct btf_type *t,
5193 			       const struct btf_member *member,
5194 			       void *kdata, const void *udata)
5195 {
5196 	const struct sched_ext_ops *uops = udata;
5197 	struct sched_ext_ops *ops = kdata;
5198 	u32 moff = __btf_member_bit_offset(t, member) / 8;
5199 	int ret;
5200 
5201 	switch (moff) {
5202 	case offsetof(struct sched_ext_ops, dispatch_max_batch):
5203 		if (*(u32 *)(udata + moff) > INT_MAX)
5204 			return -E2BIG;
5205 		ops->dispatch_max_batch = *(u32 *)(udata + moff);
5206 		return 1;
5207 	case offsetof(struct sched_ext_ops, flags):
5208 		if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
5209 			return -EINVAL;
5210 		ops->flags = *(u64 *)(udata + moff);
5211 		return 1;
5212 	case offsetof(struct sched_ext_ops, name):
5213 		ret = bpf_obj_name_cpy(ops->name, uops->name,
5214 				       sizeof(ops->name));
5215 		if (ret < 0)
5216 			return ret;
5217 		if (ret == 0)
5218 			return -EINVAL;
5219 		return 1;
5220 	case offsetof(struct sched_ext_ops, timeout_ms):
5221 		if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
5222 		    SCX_WATCHDOG_MAX_TIMEOUT)
5223 			return -E2BIG;
5224 		ops->timeout_ms = *(u32 *)(udata + moff);
5225 		return 1;
5226 	case offsetof(struct sched_ext_ops, exit_dump_len):
5227 		ops->exit_dump_len =
5228 			*(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
5229 		return 1;
5230 	case offsetof(struct sched_ext_ops, hotplug_seq):
5231 		ops->hotplug_seq = *(u64 *)(udata + moff);
5232 		return 1;
5233 	}
5234 
5235 	return 0;
5236 }
5237 
5238 static int bpf_scx_check_member(const struct btf_type *t,
5239 				const struct btf_member *member,
5240 				const struct bpf_prog *prog)
5241 {
5242 	u32 moff = __btf_member_bit_offset(t, member) / 8;
5243 
5244 	switch (moff) {
5245 	case offsetof(struct sched_ext_ops, init_task):
5246 #ifdef CONFIG_EXT_GROUP_SCHED
5247 	case offsetof(struct sched_ext_ops, cgroup_init):
5248 	case offsetof(struct sched_ext_ops, cgroup_exit):
5249 	case offsetof(struct sched_ext_ops, cgroup_prep_move):
5250 #endif
5251 	case offsetof(struct sched_ext_ops, cpu_online):
5252 	case offsetof(struct sched_ext_ops, cpu_offline):
5253 	case offsetof(struct sched_ext_ops, init):
5254 	case offsetof(struct sched_ext_ops, exit):
5255 		break;
5256 	default:
5257 		if (prog->sleepable)
5258 			return -EINVAL;
5259 	}
5260 
5261 	return 0;
5262 }
5263 
5264 static int bpf_scx_reg(void *kdata, struct bpf_link *link)
5265 {
5266 	return scx_enable(kdata, link);
5267 }
5268 
5269 static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
5270 {
5271 	struct sched_ext_ops *ops = kdata;
5272 	struct scx_sched *sch = ops->priv;
5273 
5274 	scx_disable(SCX_EXIT_UNREG);
5275 	kthread_flush_work(&sch->disable_work);
5276 	kobject_put(&sch->kobj);
5277 }
5278 
5279 static int bpf_scx_init(struct btf *btf)
5280 {
5281 	task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]);
5282 
5283 	return 0;
5284 }
5285 
5286 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
5287 {
5288 	/*
5289 	 * sched_ext does not support updating the actively-loaded BPF
5290 	 * scheduler, as registering a BPF scheduler can always fail if the
5291 	 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
5292 	 * etc. Similarly, we can always race with unregistration happening
5293 	 * elsewhere, such as with sysrq.
5294 	 */
5295 	return -EOPNOTSUPP;
5296 }
5297 
5298 static int bpf_scx_validate(void *kdata)
5299 {
5300 	return 0;
5301 }
5302 
5303 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
5304 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {}
5305 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {}
5306 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {}
5307 static void sched_ext_ops__tick(struct task_struct *p) {}
5308 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {}
5309 static void sched_ext_ops__running(struct task_struct *p) {}
5310 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {}
5311 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {}
5312 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; }
5313 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; }
5314 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {}
5315 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {}
5316 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {}
5317 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {}
5318 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {}
5319 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
5320 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {}
5321 static void sched_ext_ops__enable(struct task_struct *p) {}
5322 static void sched_ext_ops__disable(struct task_struct *p) {}
5323 #ifdef CONFIG_EXT_GROUP_SCHED
5324 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
5325 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {}
5326 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
5327 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
5328 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
5329 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {}
5330 static void sched_ext_ops__cgroup_set_bandwidth(struct cgroup *cgrp, u64 period_us, u64 quota_us, u64 burst_us) {}
5331 static void sched_ext_ops__cgroup_set_idle(struct cgroup *cgrp, bool idle) {}
5332 #endif
5333 static void sched_ext_ops__cpu_online(s32 cpu) {}
5334 static void sched_ext_ops__cpu_offline(s32 cpu) {}
5335 static s32 sched_ext_ops__init(void) { return -EINVAL; }
5336 static void sched_ext_ops__exit(struct scx_exit_info *info) {}
5337 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {}
5338 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
5339 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {}
5340 
5341 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
5342 	.select_cpu		= sched_ext_ops__select_cpu,
5343 	.enqueue		= sched_ext_ops__enqueue,
5344 	.dequeue		= sched_ext_ops__dequeue,
5345 	.dispatch		= sched_ext_ops__dispatch,
5346 	.tick			= sched_ext_ops__tick,
5347 	.runnable		= sched_ext_ops__runnable,
5348 	.running		= sched_ext_ops__running,
5349 	.stopping		= sched_ext_ops__stopping,
5350 	.quiescent		= sched_ext_ops__quiescent,
5351 	.yield			= sched_ext_ops__yield,
5352 	.core_sched_before	= sched_ext_ops__core_sched_before,
5353 	.set_weight		= sched_ext_ops__set_weight,
5354 	.set_cpumask		= sched_ext_ops__set_cpumask,
5355 	.update_idle		= sched_ext_ops__update_idle,
5356 	.cpu_acquire		= sched_ext_ops__cpu_acquire,
5357 	.cpu_release		= sched_ext_ops__cpu_release,
5358 	.init_task		= sched_ext_ops__init_task,
5359 	.exit_task		= sched_ext_ops__exit_task,
5360 	.enable			= sched_ext_ops__enable,
5361 	.disable		= sched_ext_ops__disable,
5362 #ifdef CONFIG_EXT_GROUP_SCHED
5363 	.cgroup_init		= sched_ext_ops__cgroup_init,
5364 	.cgroup_exit		= sched_ext_ops__cgroup_exit,
5365 	.cgroup_prep_move	= sched_ext_ops__cgroup_prep_move,
5366 	.cgroup_move		= sched_ext_ops__cgroup_move,
5367 	.cgroup_cancel_move	= sched_ext_ops__cgroup_cancel_move,
5368 	.cgroup_set_weight	= sched_ext_ops__cgroup_set_weight,
5369 	.cgroup_set_bandwidth	= sched_ext_ops__cgroup_set_bandwidth,
5370 	.cgroup_set_idle	= sched_ext_ops__cgroup_set_idle,
5371 #endif
5372 	.cpu_online		= sched_ext_ops__cpu_online,
5373 	.cpu_offline		= sched_ext_ops__cpu_offline,
5374 	.init			= sched_ext_ops__init,
5375 	.exit			= sched_ext_ops__exit,
5376 	.dump			= sched_ext_ops__dump,
5377 	.dump_cpu		= sched_ext_ops__dump_cpu,
5378 	.dump_task		= sched_ext_ops__dump_task,
5379 };
5380 
5381 static struct bpf_struct_ops bpf_sched_ext_ops = {
5382 	.verifier_ops = &bpf_scx_verifier_ops,
5383 	.reg = bpf_scx_reg,
5384 	.unreg = bpf_scx_unreg,
5385 	.check_member = bpf_scx_check_member,
5386 	.init_member = bpf_scx_init_member,
5387 	.init = bpf_scx_init,
5388 	.update = bpf_scx_update,
5389 	.validate = bpf_scx_validate,
5390 	.name = "sched_ext_ops",
5391 	.owner = THIS_MODULE,
5392 	.cfi_stubs = &__bpf_ops_sched_ext_ops
5393 };
5394 
5395 
5396 /********************************************************************************
5397  * System integration and init.
5398  */
5399 
5400 static void sysrq_handle_sched_ext_reset(u8 key)
5401 {
5402 	scx_disable(SCX_EXIT_SYSRQ);
5403 }
5404 
5405 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
5406 	.handler	= sysrq_handle_sched_ext_reset,
5407 	.help_msg	= "reset-sched-ext(S)",
5408 	.action_msg	= "Disable sched_ext and revert all tasks to CFS",
5409 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
5410 };
5411 
5412 static void sysrq_handle_sched_ext_dump(u8 key)
5413 {
5414 	struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
5415 
5416 	if (scx_enabled())
5417 		scx_dump_state(&ei, 0);
5418 }
5419 
5420 static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
5421 	.handler	= sysrq_handle_sched_ext_dump,
5422 	.help_msg	= "dump-sched-ext(D)",
5423 	.action_msg	= "Trigger sched_ext debug dump",
5424 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
5425 };
5426 
5427 static bool can_skip_idle_kick(struct rq *rq)
5428 {
5429 	lockdep_assert_rq_held(rq);
5430 
5431 	/*
5432 	 * We can skip idle kicking if @rq is going to go through at least one
5433 	 * full SCX scheduling cycle before going idle. Just checking whether
5434 	 * curr is not idle is insufficient because we could be racing
5435 	 * balance_one() trying to pull the next task from a remote rq, which
5436 	 * may fail, and @rq may become idle afterwards.
5437 	 *
5438 	 * The race window is small and we don't and can't guarantee that @rq is
5439 	 * only kicked while idle anyway. Skip only when sure.
5440 	 */
5441 	return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
5442 }
5443 
5444 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *ksyncs)
5445 {
5446 	struct rq *rq = cpu_rq(cpu);
5447 	struct scx_rq *this_scx = &this_rq->scx;
5448 	const struct sched_class *cur_class;
5449 	bool should_wait = false;
5450 	unsigned long flags;
5451 
5452 	raw_spin_rq_lock_irqsave(rq, flags);
5453 	cur_class = rq->curr->sched_class;
5454 
5455 	/*
5456 	 * During CPU hotplug, a CPU may depend on kicking itself to make
5457 	 * forward progress. Allow kicking self regardless of online state. If
5458 	 * @cpu is running a higher class task, we have no control over @cpu.
5459 	 * Skip kicking.
5460 	 */
5461 	if ((cpu_online(cpu) || cpu == cpu_of(this_rq)) &&
5462 	    !sched_class_above(cur_class, &ext_sched_class)) {
5463 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
5464 			if (cur_class == &ext_sched_class)
5465 				rq->curr->scx.slice = 0;
5466 			cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5467 		}
5468 
5469 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
5470 			if (cur_class == &ext_sched_class) {
5471 				ksyncs[cpu] = rq->scx.kick_sync;
5472 				should_wait = true;
5473 			} else {
5474 				cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5475 			}
5476 		}
5477 
5478 		resched_curr(rq);
5479 	} else {
5480 		cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5481 		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5482 	}
5483 
5484 	raw_spin_rq_unlock_irqrestore(rq, flags);
5485 
5486 	return should_wait;
5487 }
5488 
5489 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
5490 {
5491 	struct rq *rq = cpu_rq(cpu);
5492 	unsigned long flags;
5493 
5494 	raw_spin_rq_lock_irqsave(rq, flags);
5495 
5496 	if (!can_skip_idle_kick(rq) &&
5497 	    (cpu_online(cpu) || cpu == cpu_of(this_rq)))
5498 		resched_curr(rq);
5499 
5500 	raw_spin_rq_unlock_irqrestore(rq, flags);
5501 }
5502 
5503 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
5504 {
5505 	struct rq *this_rq = this_rq();
5506 	struct scx_rq *this_scx = &this_rq->scx;
5507 	struct scx_kick_syncs __rcu *ksyncs_pcpu = __this_cpu_read(scx_kick_syncs);
5508 	bool should_wait = false;
5509 	unsigned long *ksyncs;
5510 	s32 cpu;
5511 
5512 	if (unlikely(!ksyncs_pcpu)) {
5513 		pr_warn_once("kick_cpus_irq_workfn() called with NULL scx_kick_syncs");
5514 		return;
5515 	}
5516 
5517 	ksyncs = rcu_dereference_bh(ksyncs_pcpu)->syncs;
5518 
5519 	for_each_cpu(cpu, this_scx->cpus_to_kick) {
5520 		should_wait |= kick_one_cpu(cpu, this_rq, ksyncs);
5521 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
5522 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5523 	}
5524 
5525 	for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
5526 		kick_one_cpu_if_idle(cpu, this_rq);
5527 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5528 	}
5529 
5530 	if (!should_wait)
5531 		return;
5532 
5533 	for_each_cpu(cpu, this_scx->cpus_to_wait) {
5534 		unsigned long *wait_kick_sync = &cpu_rq(cpu)->scx.kick_sync;
5535 
5536 		/*
5537 		 * Busy-wait until the task running at the time of kicking is no
5538 		 * longer running. This can be used to implement e.g. core
5539 		 * scheduling.
5540 		 *
5541 		 * smp_cond_load_acquire() pairs with store_releases in
5542 		 * pick_task_scx() and put_prev_task_scx(). The former breaks
5543 		 * the wait if SCX's scheduling path is entered even if the same
5544 		 * task is picked subsequently. The latter is necessary to break
5545 		 * the wait when $cpu is taken by a higher sched class.
5546 		 */
5547 		if (cpu != cpu_of(this_rq))
5548 			smp_cond_load_acquire(wait_kick_sync, VAL != ksyncs[cpu]);
5549 
5550 		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5551 	}
5552 }
5553 
5554 /**
5555  * print_scx_info - print out sched_ext scheduler state
5556  * @log_lvl: the log level to use when printing
5557  * @p: target task
5558  *
5559  * If a sched_ext scheduler is enabled, print the name and state of the
5560  * scheduler. If @p is on sched_ext, print further information about the task.
5561  *
5562  * This function can be safely called on any task as long as the task_struct
5563  * itself is accessible. While safe, this function isn't synchronized and may
5564  * print out mixups or garbages of limited length.
5565  */
5566 void print_scx_info(const char *log_lvl, struct task_struct *p)
5567 {
5568 	struct scx_sched *sch = scx_root;
5569 	enum scx_enable_state state = scx_enable_state();
5570 	const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
5571 	char runnable_at_buf[22] = "?";
5572 	struct sched_class *class;
5573 	unsigned long runnable_at;
5574 
5575 	if (state == SCX_DISABLED)
5576 		return;
5577 
5578 	/*
5579 	 * Carefully check if the task was running on sched_ext, and then
5580 	 * carefully copy the time it's been runnable, and its state.
5581 	 */
5582 	if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
5583 	    class != &ext_sched_class) {
5584 		printk("%sSched_ext: %s (%s%s)", log_lvl, sch->ops.name,
5585 		       scx_enable_state_str[state], all);
5586 		return;
5587 	}
5588 
5589 	if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
5590 				      sizeof(runnable_at)))
5591 		scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
5592 			  jiffies_delta_msecs(runnable_at, jiffies));
5593 
5594 	/* print everything onto one line to conserve console space */
5595 	printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
5596 	       log_lvl, sch->ops.name, scx_enable_state_str[state], all,
5597 	       runnable_at_buf);
5598 }
5599 
5600 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
5601 {
5602 	/*
5603 	 * SCX schedulers often have userspace components which are sometimes
5604 	 * involved in critial scheduling paths. PM operations involve freezing
5605 	 * userspace which can lead to scheduling misbehaviors including stalls.
5606 	 * Let's bypass while PM operations are in progress.
5607 	 */
5608 	switch (event) {
5609 	case PM_HIBERNATION_PREPARE:
5610 	case PM_SUSPEND_PREPARE:
5611 	case PM_RESTORE_PREPARE:
5612 		scx_bypass(true);
5613 		break;
5614 	case PM_POST_HIBERNATION:
5615 	case PM_POST_SUSPEND:
5616 	case PM_POST_RESTORE:
5617 		scx_bypass(false);
5618 		break;
5619 	}
5620 
5621 	return NOTIFY_OK;
5622 }
5623 
5624 static struct notifier_block scx_pm_notifier = {
5625 	.notifier_call = scx_pm_handler,
5626 };
5627 
5628 void __init init_sched_ext_class(void)
5629 {
5630 	s32 cpu, v;
5631 
5632 	/*
5633 	 * The following is to prevent the compiler from optimizing out the enum
5634 	 * definitions so that BPF scheduler implementations can use them
5635 	 * through the generated vmlinux.h.
5636 	 */
5637 	WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
5638 		   SCX_TG_ONLINE);
5639 
5640 	scx_idle_init_masks();
5641 
5642 	for_each_possible_cpu(cpu) {
5643 		struct rq *rq = cpu_rq(cpu);
5644 		int  n = cpu_to_node(cpu);
5645 
5646 		init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
5647 		init_dsq(&rq->scx.bypass_dsq, SCX_DSQ_BYPASS);
5648 		INIT_LIST_HEAD(&rq->scx.runnable_list);
5649 		INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
5650 
5651 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n));
5652 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n));
5653 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n));
5654 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n));
5655 		rq->scx.deferred_irq_work = IRQ_WORK_INIT_HARD(deferred_irq_workfn);
5656 		rq->scx.kick_cpus_irq_work = IRQ_WORK_INIT_HARD(kick_cpus_irq_workfn);
5657 
5658 		if (cpu_online(cpu))
5659 			cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
5660 	}
5661 
5662 	register_sysrq_key('S', &sysrq_sched_ext_reset_op);
5663 	register_sysrq_key('D', &sysrq_sched_ext_dump_op);
5664 	INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
5665 }
5666 
5667 
5668 /********************************************************************************
5669  * Helpers that can be called from the BPF scheduler.
5670  */
5671 static bool scx_dsq_insert_preamble(struct scx_sched *sch, struct task_struct *p,
5672 				    u64 enq_flags)
5673 {
5674 	if (!scx_kf_allowed(sch, SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
5675 		return false;
5676 
5677 	lockdep_assert_irqs_disabled();
5678 
5679 	if (unlikely(!p)) {
5680 		scx_error(sch, "called with NULL task");
5681 		return false;
5682 	}
5683 
5684 	if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
5685 		scx_error(sch, "invalid enq_flags 0x%llx", enq_flags);
5686 		return false;
5687 	}
5688 
5689 	return true;
5690 }
5691 
5692 static void scx_dsq_insert_commit(struct scx_sched *sch, struct task_struct *p,
5693 				  u64 dsq_id, u64 enq_flags)
5694 {
5695 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
5696 	struct task_struct *ddsp_task;
5697 
5698 	ddsp_task = __this_cpu_read(direct_dispatch_task);
5699 	if (ddsp_task) {
5700 		mark_direct_dispatch(sch, ddsp_task, p, dsq_id, enq_flags);
5701 		return;
5702 	}
5703 
5704 	if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
5705 		scx_error(sch, "dispatch buffer overflow");
5706 		return;
5707 	}
5708 
5709 	dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
5710 		.task = p,
5711 		.qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
5712 		.dsq_id = dsq_id,
5713 		.enq_flags = enq_flags,
5714 	};
5715 }
5716 
5717 __bpf_kfunc_start_defs();
5718 
5719 /**
5720  * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ
5721  * @p: task_struct to insert
5722  * @dsq_id: DSQ to insert into
5723  * @slice: duration @p can run for in nsecs, 0 to keep the current value
5724  * @enq_flags: SCX_ENQ_*
5725  *
5726  * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to
5727  * call this function spuriously. Can be called from ops.enqueue(),
5728  * ops.select_cpu(), and ops.dispatch().
5729  *
5730  * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
5731  * and @p must match the task being enqueued.
5732  *
5733  * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
5734  * will be directly inserted into the corresponding dispatch queue after
5735  * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be
5736  * inserted into the local DSQ of the CPU returned by ops.select_cpu().
5737  * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
5738  * task is inserted.
5739  *
5740  * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
5741  * and this function can be called upto ops.dispatch_max_batch times to insert
5742  * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
5743  * remaining slots. scx_bpf_dsq_move_to_local() flushes the batch and resets the
5744  * counter.
5745  *
5746  * This function doesn't have any locking restrictions and may be called under
5747  * BPF locks (in the future when BPF introduces more flexible locking).
5748  *
5749  * @p is allowed to run for @slice. The scheduling path is triggered on slice
5750  * exhaustion. If zero, the current residual slice is maintained. If
5751  * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
5752  * scx_bpf_kick_cpu() to trigger scheduling.
5753  *
5754  * Returns %true on successful insertion, %false on failure. On the root
5755  * scheduler, %false return triggers scheduler abort and the caller doesn't need
5756  * to check the return value.
5757  */
5758 __bpf_kfunc bool scx_bpf_dsq_insert___v2(struct task_struct *p, u64 dsq_id,
5759 					 u64 slice, u64 enq_flags)
5760 {
5761 	struct scx_sched *sch;
5762 
5763 	guard(rcu)();
5764 	sch = rcu_dereference(scx_root);
5765 	if (unlikely(!sch))
5766 		return false;
5767 
5768 	if (!scx_dsq_insert_preamble(sch, p, enq_flags))
5769 		return false;
5770 
5771 	if (slice)
5772 		p->scx.slice = slice;
5773 	else
5774 		p->scx.slice = p->scx.slice ?: 1;
5775 
5776 	scx_dsq_insert_commit(sch, p, dsq_id, enq_flags);
5777 
5778 	return true;
5779 }
5780 
5781 /*
5782  * COMPAT: Will be removed in v6.23 along with the ___v2 suffix.
5783  */
5784 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id,
5785 					     u64 slice, u64 enq_flags)
5786 {
5787 	scx_bpf_dsq_insert___v2(p, dsq_id, slice, enq_flags);
5788 }
5789 
5790 static bool scx_dsq_insert_vtime(struct scx_sched *sch, struct task_struct *p,
5791 				 u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags)
5792 {
5793 	if (!scx_dsq_insert_preamble(sch, p, enq_flags))
5794 		return false;
5795 
5796 	if (slice)
5797 		p->scx.slice = slice;
5798 	else
5799 		p->scx.slice = p->scx.slice ?: 1;
5800 
5801 	p->scx.dsq_vtime = vtime;
5802 
5803 	scx_dsq_insert_commit(sch, p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
5804 
5805 	return true;
5806 }
5807 
5808 struct scx_bpf_dsq_insert_vtime_args {
5809 	/* @p can't be packed together as KF_RCU is not transitive */
5810 	u64			dsq_id;
5811 	u64			slice;
5812 	u64			vtime;
5813 	u64			enq_flags;
5814 };
5815 
5816 /**
5817  * __scx_bpf_dsq_insert_vtime - Arg-wrapped vtime DSQ insertion
5818  * @p: task_struct to insert
5819  * @args: struct containing the rest of the arguments
5820  *       @args->dsq_id: DSQ to insert into
5821  *       @args->slice: duration @p can run for in nsecs, 0 to keep the current value
5822  *       @args->vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
5823  *       @args->enq_flags: SCX_ENQ_*
5824  *
5825  * Wrapper kfunc that takes arguments via struct to work around BPF's 5 argument
5826  * limit. BPF programs should use scx_bpf_dsq_insert_vtime() which is provided
5827  * as an inline wrapper in common.bpf.h.
5828  *
5829  * Insert @p into the vtime priority queue of the DSQ identified by
5830  * @args->dsq_id. Tasks queued into the priority queue are ordered by
5831  * @args->vtime. All other aspects are identical to scx_bpf_dsq_insert().
5832  *
5833  * @args->vtime ordering is according to time_before64() which considers
5834  * wrapping. A numerically larger vtime may indicate an earlier position in the
5835  * ordering and vice-versa.
5836  *
5837  * A DSQ can only be used as a FIFO or priority queue at any given time and this
5838  * function must not be called on a DSQ which already has one or more FIFO tasks
5839  * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and
5840  * SCX_DSQ_GLOBAL) cannot be used as priority queues.
5841  *
5842  * Returns %true on successful insertion, %false on failure. On the root
5843  * scheduler, %false return triggers scheduler abort and the caller doesn't need
5844  * to check the return value.
5845  */
5846 __bpf_kfunc bool
5847 __scx_bpf_dsq_insert_vtime(struct task_struct *p,
5848 			   struct scx_bpf_dsq_insert_vtime_args *args)
5849 {
5850 	struct scx_sched *sch;
5851 
5852 	guard(rcu)();
5853 
5854 	sch = rcu_dereference(scx_root);
5855 	if (unlikely(!sch))
5856 		return false;
5857 
5858 	return scx_dsq_insert_vtime(sch, p, args->dsq_id, args->slice,
5859 				    args->vtime, args->enq_flags);
5860 }
5861 
5862 /*
5863  * COMPAT: Will be removed in v6.23.
5864  */
5865 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id,
5866 					  u64 slice, u64 vtime, u64 enq_flags)
5867 {
5868 	struct scx_sched *sch;
5869 
5870 	guard(rcu)();
5871 
5872 	sch = rcu_dereference(scx_root);
5873 	if (unlikely(!sch))
5874 		return;
5875 
5876 	scx_dsq_insert_vtime(sch, p, dsq_id, slice, vtime, enq_flags);
5877 }
5878 
5879 __bpf_kfunc_end_defs();
5880 
5881 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
5882 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU)
5883 BTF_ID_FLAGS(func, scx_bpf_dsq_insert___v2, KF_RCU)
5884 BTF_ID_FLAGS(func, __scx_bpf_dsq_insert_vtime, KF_RCU)
5885 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU)
5886 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
5887 
5888 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
5889 	.owner			= THIS_MODULE,
5890 	.set			= &scx_kfunc_ids_enqueue_dispatch,
5891 };
5892 
5893 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit,
5894 			 struct task_struct *p, u64 dsq_id, u64 enq_flags)
5895 {
5896 	struct scx_sched *sch = scx_root;
5897 	struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
5898 	struct rq *this_rq, *src_rq, *locked_rq;
5899 	bool dispatched = false;
5900 	bool in_balance;
5901 	unsigned long flags;
5902 
5903 	if (!scx_kf_allowed_if_unlocked() &&
5904 	    !scx_kf_allowed(sch, SCX_KF_DISPATCH))
5905 		return false;
5906 
5907 	/*
5908 	 * If the BPF scheduler keeps calling this function repeatedly, it can
5909 	 * cause similar live-lock conditions as consume_dispatch_q().
5910 	 */
5911 	if (unlikely(READ_ONCE(scx_aborting)))
5912 		return false;
5913 
5914 	/*
5915 	 * Can be called from either ops.dispatch() locking this_rq() or any
5916 	 * context where no rq lock is held. If latter, lock @p's task_rq which
5917 	 * we'll likely need anyway.
5918 	 */
5919 	src_rq = task_rq(p);
5920 
5921 	local_irq_save(flags);
5922 	this_rq = this_rq();
5923 	in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
5924 
5925 	if (in_balance) {
5926 		if (this_rq != src_rq) {
5927 			raw_spin_rq_unlock(this_rq);
5928 			raw_spin_rq_lock(src_rq);
5929 		}
5930 	} else {
5931 		raw_spin_rq_lock(src_rq);
5932 	}
5933 
5934 	locked_rq = src_rq;
5935 	raw_spin_lock(&src_dsq->lock);
5936 
5937 	/*
5938 	 * Did someone else get to it? @p could have already left $src_dsq, got
5939 	 * re-enqueud, or be in the process of being consumed by someone else.
5940 	 */
5941 	if (unlikely(p->scx.dsq != src_dsq ||
5942 		     u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
5943 		     p->scx.holding_cpu >= 0) ||
5944 	    WARN_ON_ONCE(src_rq != task_rq(p))) {
5945 		raw_spin_unlock(&src_dsq->lock);
5946 		goto out;
5947 	}
5948 
5949 	/* @p is still on $src_dsq and stable, determine the destination */
5950 	dst_dsq = find_dsq_for_dispatch(sch, this_rq, dsq_id, p);
5951 
5952 	/*
5953 	 * Apply vtime and slice updates before moving so that the new time is
5954 	 * visible before inserting into $dst_dsq. @p is still on $src_dsq but
5955 	 * this is safe as we're locking it.
5956 	 */
5957 	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
5958 		p->scx.dsq_vtime = kit->vtime;
5959 	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
5960 		p->scx.slice = kit->slice;
5961 
5962 	/* execute move */
5963 	locked_rq = move_task_between_dsqs(sch, p, enq_flags, src_dsq, dst_dsq);
5964 	dispatched = true;
5965 out:
5966 	if (in_balance) {
5967 		if (this_rq != locked_rq) {
5968 			raw_spin_rq_unlock(locked_rq);
5969 			raw_spin_rq_lock(this_rq);
5970 		}
5971 	} else {
5972 		raw_spin_rq_unlock_irqrestore(locked_rq, flags);
5973 	}
5974 
5975 	kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
5976 			       __SCX_DSQ_ITER_HAS_VTIME);
5977 	return dispatched;
5978 }
5979 
5980 __bpf_kfunc_start_defs();
5981 
5982 /**
5983  * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
5984  *
5985  * Can only be called from ops.dispatch().
5986  */
5987 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
5988 {
5989 	struct scx_sched *sch;
5990 
5991 	guard(rcu)();
5992 
5993 	sch = rcu_dereference(scx_root);
5994 	if (unlikely(!sch))
5995 		return 0;
5996 
5997 	if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
5998 		return 0;
5999 
6000 	return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
6001 }
6002 
6003 /**
6004  * scx_bpf_dispatch_cancel - Cancel the latest dispatch
6005  *
6006  * Cancel the latest dispatch. Can be called multiple times to cancel further
6007  * dispatches. Can only be called from ops.dispatch().
6008  */
6009 __bpf_kfunc void scx_bpf_dispatch_cancel(void)
6010 {
6011 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6012 	struct scx_sched *sch;
6013 
6014 	guard(rcu)();
6015 
6016 	sch = rcu_dereference(scx_root);
6017 	if (unlikely(!sch))
6018 		return;
6019 
6020 	if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
6021 		return;
6022 
6023 	if (dspc->cursor > 0)
6024 		dspc->cursor--;
6025 	else
6026 		scx_error(sch, "dispatch buffer underflow");
6027 }
6028 
6029 /**
6030  * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ
6031  * @dsq_id: DSQ to move task from
6032  *
6033  * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's
6034  * local DSQ for execution. Can only be called from ops.dispatch().
6035  *
6036  * This function flushes the in-flight dispatches from scx_bpf_dsq_insert()
6037  * before trying to move from the specified DSQ. It may also grab rq locks and
6038  * thus can't be called under any BPF locks.
6039  *
6040  * Returns %true if a task has been moved, %false if there isn't any task to
6041  * move.
6042  */
6043 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
6044 {
6045 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6046 	struct scx_dispatch_q *dsq;
6047 	struct scx_sched *sch;
6048 
6049 	guard(rcu)();
6050 
6051 	sch = rcu_dereference(scx_root);
6052 	if (unlikely(!sch))
6053 		return false;
6054 
6055 	if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
6056 		return false;
6057 
6058 	flush_dispatch_buf(sch, dspc->rq);
6059 
6060 	dsq = find_user_dsq(sch, dsq_id);
6061 	if (unlikely(!dsq)) {
6062 		scx_error(sch, "invalid DSQ ID 0x%016llx", dsq_id);
6063 		return false;
6064 	}
6065 
6066 	if (consume_dispatch_q(sch, dspc->rq, dsq)) {
6067 		/*
6068 		 * A successfully consumed task can be dequeued before it starts
6069 		 * running while the CPU is trying to migrate other dispatched
6070 		 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
6071 		 * local DSQ.
6072 		 */
6073 		dspc->nr_tasks++;
6074 		return true;
6075 	} else {
6076 		return false;
6077 	}
6078 }
6079 
6080 /**
6081  * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs
6082  * @it__iter: DSQ iterator in progress
6083  * @slice: duration the moved task can run for in nsecs
6084  *
6085  * Override the slice of the next task that will be moved from @it__iter using
6086  * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous
6087  * slice duration is kept.
6088  */
6089 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter,
6090 					    u64 slice)
6091 {
6092 	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6093 
6094 	kit->slice = slice;
6095 	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
6096 }
6097 
6098 /**
6099  * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs
6100  * @it__iter: DSQ iterator in progress
6101  * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
6102  *
6103  * Override the vtime of the next task that will be moved from @it__iter using
6104  * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice
6105  * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the
6106  * override is ignored and cleared.
6107  */
6108 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter,
6109 					    u64 vtime)
6110 {
6111 	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6112 
6113 	kit->vtime = vtime;
6114 	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
6115 }
6116 
6117 /**
6118  * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ
6119  * @it__iter: DSQ iterator in progress
6120  * @p: task to transfer
6121  * @dsq_id: DSQ to move @p to
6122  * @enq_flags: SCX_ENQ_*
6123  *
6124  * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
6125  * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
6126  * be the destination.
6127  *
6128  * For the transfer to be successful, @p must still be on the DSQ and have been
6129  * queued before the DSQ iteration started. This function doesn't care whether
6130  * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
6131  * been queued before the iteration started.
6132  *
6133  * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update.
6134  *
6135  * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
6136  * lock (e.g. BPF timers or SYSCALL programs).
6137  *
6138  * Returns %true if @p has been consumed, %false if @p had already been
6139  * consumed, dequeued, or, for sub-scheds, @dsq_id points to a disallowed local
6140  * DSQ.
6141  */
6142 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter,
6143 				  struct task_struct *p, u64 dsq_id,
6144 				  u64 enq_flags)
6145 {
6146 	return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6147 			    p, dsq_id, enq_flags);
6148 }
6149 
6150 /**
6151  * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ
6152  * @it__iter: DSQ iterator in progress
6153  * @p: task to transfer
6154  * @dsq_id: DSQ to move @p to
6155  * @enq_flags: SCX_ENQ_*
6156  *
6157  * Transfer @p which is on the DSQ currently iterated by @it__iter to the
6158  * priority queue of the DSQ specified by @dsq_id. The destination must be a
6159  * user DSQ as only user DSQs support priority queue.
6160  *
6161  * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice()
6162  * and scx_bpf_dsq_move_set_vtime() to update.
6163  *
6164  * All other aspects are identical to scx_bpf_dsq_move(). See
6165  * scx_bpf_dsq_insert_vtime() for more information on @vtime.
6166  */
6167 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter,
6168 					struct task_struct *p, u64 dsq_id,
6169 					u64 enq_flags)
6170 {
6171 	return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6172 			    p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6173 }
6174 
6175 __bpf_kfunc_end_defs();
6176 
6177 BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
6178 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
6179 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
6180 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local)
6181 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU)
6182 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU)
6183 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6184 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6185 BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
6186 
6187 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
6188 	.owner			= THIS_MODULE,
6189 	.set			= &scx_kfunc_ids_dispatch,
6190 };
6191 
6192 static u32 reenq_local(struct rq *rq)
6193 {
6194 	LIST_HEAD(tasks);
6195 	u32 nr_enqueued = 0;
6196 	struct task_struct *p, *n;
6197 
6198 	lockdep_assert_rq_held(rq);
6199 
6200 	/*
6201 	 * The BPF scheduler may choose to dispatch tasks back to
6202 	 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
6203 	 * first to avoid processing the same tasks repeatedly.
6204 	 */
6205 	list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
6206 				 scx.dsq_list.node) {
6207 		/*
6208 		 * If @p is being migrated, @p's current CPU may not agree with
6209 		 * its allowed CPUs and the migration_cpu_stop is about to
6210 		 * deactivate and re-activate @p anyway. Skip re-enqueueing.
6211 		 *
6212 		 * While racing sched property changes may also dequeue and
6213 		 * re-enqueue a migrating task while its current CPU and allowed
6214 		 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
6215 		 * the current local DSQ for running tasks and thus are not
6216 		 * visible to the BPF scheduler.
6217 		 */
6218 		if (p->migration_pending)
6219 			continue;
6220 
6221 		dispatch_dequeue(rq, p);
6222 		list_add_tail(&p->scx.dsq_list.node, &tasks);
6223 	}
6224 
6225 	list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
6226 		list_del_init(&p->scx.dsq_list.node);
6227 		do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
6228 		nr_enqueued++;
6229 	}
6230 
6231 	return nr_enqueued;
6232 }
6233 
6234 __bpf_kfunc_start_defs();
6235 
6236 /**
6237  * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
6238  *
6239  * Iterate over all of the tasks currently enqueued on the local DSQ of the
6240  * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
6241  * processed tasks. Can only be called from ops.cpu_release().
6242  *
6243  * COMPAT: Will be removed in v6.23 along with the ___v2 suffix on the void
6244  * returning variant that can be called from anywhere.
6245  */
6246 __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
6247 {
6248 	struct scx_sched *sch;
6249 	struct rq *rq;
6250 
6251 	guard(rcu)();
6252 	sch = rcu_dereference(scx_root);
6253 	if (unlikely(!sch))
6254 		return 0;
6255 
6256 	if (!scx_kf_allowed(sch, SCX_KF_CPU_RELEASE))
6257 		return 0;
6258 
6259 	rq = cpu_rq(smp_processor_id());
6260 	lockdep_assert_rq_held(rq);
6261 
6262 	return reenq_local(rq);
6263 }
6264 
6265 __bpf_kfunc_end_defs();
6266 
6267 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
6268 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
6269 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
6270 
6271 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
6272 	.owner			= THIS_MODULE,
6273 	.set			= &scx_kfunc_ids_cpu_release,
6274 };
6275 
6276 __bpf_kfunc_start_defs();
6277 
6278 /**
6279  * scx_bpf_create_dsq - Create a custom DSQ
6280  * @dsq_id: DSQ to create
6281  * @node: NUMA node to allocate from
6282  *
6283  * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
6284  * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
6285  */
6286 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
6287 {
6288 	struct scx_dispatch_q *dsq;
6289 	struct scx_sched *sch;
6290 	s32 ret;
6291 
6292 	if (unlikely(node >= (int)nr_node_ids ||
6293 		     (node < 0 && node != NUMA_NO_NODE)))
6294 		return -EINVAL;
6295 
6296 	if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN))
6297 		return -EINVAL;
6298 
6299 	dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
6300 	if (!dsq)
6301 		return -ENOMEM;
6302 
6303 	init_dsq(dsq, dsq_id);
6304 
6305 	rcu_read_lock();
6306 
6307 	sch = rcu_dereference(scx_root);
6308 	if (sch)
6309 		ret = rhashtable_lookup_insert_fast(&sch->dsq_hash, &dsq->hash_node,
6310 						    dsq_hash_params);
6311 	else
6312 		ret = -ENODEV;
6313 
6314 	rcu_read_unlock();
6315 	if (ret)
6316 		kfree(dsq);
6317 	return ret;
6318 }
6319 
6320 __bpf_kfunc_end_defs();
6321 
6322 BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
6323 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
6324 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU)
6325 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU)
6326 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6327 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6328 BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
6329 
6330 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
6331 	.owner			= THIS_MODULE,
6332 	.set			= &scx_kfunc_ids_unlocked,
6333 };
6334 
6335 __bpf_kfunc_start_defs();
6336 
6337 /**
6338  * scx_bpf_task_set_slice - Set task's time slice
6339  * @p: task of interest
6340  * @slice: time slice to set in nsecs
6341  *
6342  * Set @p's time slice to @slice. Returns %true on success, %false if the
6343  * calling scheduler doesn't have authority over @p.
6344  */
6345 __bpf_kfunc bool scx_bpf_task_set_slice(struct task_struct *p, u64 slice)
6346 {
6347 	p->scx.slice = slice;
6348 	return true;
6349 }
6350 
6351 /**
6352  * scx_bpf_task_set_dsq_vtime - Set task's virtual time for DSQ ordering
6353  * @p: task of interest
6354  * @vtime: virtual time to set
6355  *
6356  * Set @p's virtual time to @vtime. Returns %true on success, %false if the
6357  * calling scheduler doesn't have authority over @p.
6358  */
6359 __bpf_kfunc bool scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime)
6360 {
6361 	p->scx.dsq_vtime = vtime;
6362 	return true;
6363 }
6364 
6365 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags)
6366 {
6367 	struct rq *this_rq;
6368 	unsigned long irq_flags;
6369 
6370 	if (!ops_cpu_valid(sch, cpu, NULL))
6371 		return;
6372 
6373 	local_irq_save(irq_flags);
6374 
6375 	this_rq = this_rq();
6376 
6377 	/*
6378 	 * While bypassing for PM ops, IRQ handling may not be online which can
6379 	 * lead to irq_work_queue() malfunction such as infinite busy wait for
6380 	 * IRQ status update. Suppress kicking.
6381 	 */
6382 	if (scx_rq_bypassing(this_rq))
6383 		goto out;
6384 
6385 	/*
6386 	 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
6387 	 * rq locks. We can probably be smarter and avoid bouncing if called
6388 	 * from ops which don't hold a rq lock.
6389 	 */
6390 	if (flags & SCX_KICK_IDLE) {
6391 		struct rq *target_rq = cpu_rq(cpu);
6392 
6393 		if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
6394 			scx_error(sch, "PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
6395 
6396 		if (raw_spin_rq_trylock(target_rq)) {
6397 			if (can_skip_idle_kick(target_rq)) {
6398 				raw_spin_rq_unlock(target_rq);
6399 				goto out;
6400 			}
6401 			raw_spin_rq_unlock(target_rq);
6402 		}
6403 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
6404 	} else {
6405 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
6406 
6407 		if (flags & SCX_KICK_PREEMPT)
6408 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
6409 		if (flags & SCX_KICK_WAIT)
6410 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
6411 	}
6412 
6413 	irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
6414 out:
6415 	local_irq_restore(irq_flags);
6416 }
6417 
6418 /**
6419  * scx_bpf_kick_cpu - Trigger reschedule on a CPU
6420  * @cpu: cpu to kick
6421  * @flags: %SCX_KICK_* flags
6422  *
6423  * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
6424  * trigger rescheduling on a busy CPU. This can be called from any online
6425  * scx_ops operation and the actual kicking is performed asynchronously through
6426  * an irq work.
6427  */
6428 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
6429 {
6430 	struct scx_sched *sch;
6431 
6432 	guard(rcu)();
6433 	sch = rcu_dereference(scx_root);
6434 	if (likely(sch))
6435 		scx_kick_cpu(sch, cpu, flags);
6436 }
6437 
6438 /**
6439  * scx_bpf_dsq_nr_queued - Return the number of queued tasks
6440  * @dsq_id: id of the DSQ
6441  *
6442  * Return the number of tasks in the DSQ matching @dsq_id. If not found,
6443  * -%ENOENT is returned.
6444  */
6445 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
6446 {
6447 	struct scx_sched *sch;
6448 	struct scx_dispatch_q *dsq;
6449 	s32 ret;
6450 
6451 	preempt_disable();
6452 
6453 	sch = rcu_dereference_sched(scx_root);
6454 	if (unlikely(!sch)) {
6455 		ret = -ENODEV;
6456 		goto out;
6457 	}
6458 
6459 	if (dsq_id == SCX_DSQ_LOCAL) {
6460 		ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
6461 		goto out;
6462 	} else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
6463 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
6464 
6465 		if (ops_cpu_valid(sch, cpu, NULL)) {
6466 			ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
6467 			goto out;
6468 		}
6469 	} else {
6470 		dsq = find_user_dsq(sch, dsq_id);
6471 		if (dsq) {
6472 			ret = READ_ONCE(dsq->nr);
6473 			goto out;
6474 		}
6475 	}
6476 	ret = -ENOENT;
6477 out:
6478 	preempt_enable();
6479 	return ret;
6480 }
6481 
6482 /**
6483  * scx_bpf_destroy_dsq - Destroy a custom DSQ
6484  * @dsq_id: DSQ to destroy
6485  *
6486  * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
6487  * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
6488  * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
6489  * which doesn't exist. Can be called from any online scx_ops operations.
6490  */
6491 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
6492 {
6493 	struct scx_sched *sch;
6494 
6495 	rcu_read_lock();
6496 	sch = rcu_dereference(scx_root);
6497 	if (sch)
6498 		destroy_dsq(sch, dsq_id);
6499 	rcu_read_unlock();
6500 }
6501 
6502 /**
6503  * bpf_iter_scx_dsq_new - Create a DSQ iterator
6504  * @it: iterator to initialize
6505  * @dsq_id: DSQ to iterate
6506  * @flags: %SCX_DSQ_ITER_*
6507  *
6508  * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
6509  * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
6510  * tasks which are already queued when this function is invoked.
6511  */
6512 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
6513 				     u64 flags)
6514 {
6515 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6516 	struct scx_sched *sch;
6517 
6518 	BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
6519 		     sizeof(struct bpf_iter_scx_dsq));
6520 	BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
6521 		     __alignof__(struct bpf_iter_scx_dsq));
6522 	BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
6523 		     ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
6524 
6525 	/*
6526 	 * next() and destroy() will be called regardless of the return value.
6527 	 * Always clear $kit->dsq.
6528 	 */
6529 	kit->dsq = NULL;
6530 
6531 	sch = rcu_dereference_check(scx_root, rcu_read_lock_bh_held());
6532 	if (unlikely(!sch))
6533 		return -ENODEV;
6534 
6535 	if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
6536 		return -EINVAL;
6537 
6538 	kit->dsq = find_user_dsq(sch, dsq_id);
6539 	if (!kit->dsq)
6540 		return -ENOENT;
6541 
6542 	kit->cursor = INIT_DSQ_LIST_CURSOR(kit->cursor, flags,
6543 					   READ_ONCE(kit->dsq->seq));
6544 
6545 	return 0;
6546 }
6547 
6548 /**
6549  * bpf_iter_scx_dsq_next - Progress a DSQ iterator
6550  * @it: iterator to progress
6551  *
6552  * Return the next task. See bpf_iter_scx_dsq_new().
6553  */
6554 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
6555 {
6556 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6557 	bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
6558 	struct task_struct *p;
6559 	unsigned long flags;
6560 
6561 	if (!kit->dsq)
6562 		return NULL;
6563 
6564 	raw_spin_lock_irqsave(&kit->dsq->lock, flags);
6565 
6566 	if (list_empty(&kit->cursor.node))
6567 		p = NULL;
6568 	else
6569 		p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
6570 
6571 	/*
6572 	 * Only tasks which were queued before the iteration started are
6573 	 * visible. This bounds BPF iterations and guarantees that vtime never
6574 	 * jumps in the other direction while iterating.
6575 	 */
6576 	do {
6577 		p = nldsq_next_task(kit->dsq, p, rev);
6578 	} while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
6579 
6580 	if (p) {
6581 		if (rev)
6582 			list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
6583 		else
6584 			list_move(&kit->cursor.node, &p->scx.dsq_list.node);
6585 	} else {
6586 		list_del_init(&kit->cursor.node);
6587 	}
6588 
6589 	raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
6590 
6591 	return p;
6592 }
6593 
6594 /**
6595  * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
6596  * @it: iterator to destroy
6597  *
6598  * Undo scx_iter_scx_dsq_new().
6599  */
6600 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
6601 {
6602 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6603 
6604 	if (!kit->dsq)
6605 		return;
6606 
6607 	if (!list_empty(&kit->cursor.node)) {
6608 		unsigned long flags;
6609 
6610 		raw_spin_lock_irqsave(&kit->dsq->lock, flags);
6611 		list_del_init(&kit->cursor.node);
6612 		raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
6613 	}
6614 	kit->dsq = NULL;
6615 }
6616 
6617 /**
6618  * scx_bpf_dsq_peek - Lockless peek at the first element.
6619  * @dsq_id: DSQ to examine.
6620  *
6621  * Read the first element in the DSQ. This is semantically equivalent to using
6622  * the DSQ iterator, but is lockfree. Of course, like any lockless operation,
6623  * this provides only a point-in-time snapshot, and the contents may change
6624  * by the time any subsequent locking operation reads the queue.
6625  *
6626  * Returns the pointer, or NULL indicates an empty queue OR internal error.
6627  */
6628 __bpf_kfunc struct task_struct *scx_bpf_dsq_peek(u64 dsq_id)
6629 {
6630 	struct scx_sched *sch;
6631 	struct scx_dispatch_q *dsq;
6632 
6633 	sch = rcu_dereference(scx_root);
6634 	if (unlikely(!sch))
6635 		return NULL;
6636 
6637 	if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN)) {
6638 		scx_error(sch, "peek disallowed on builtin DSQ 0x%llx", dsq_id);
6639 		return NULL;
6640 	}
6641 
6642 	dsq = find_user_dsq(sch, dsq_id);
6643 	if (unlikely(!dsq)) {
6644 		scx_error(sch, "peek on non-existent DSQ 0x%llx", dsq_id);
6645 		return NULL;
6646 	}
6647 
6648 	return rcu_dereference(dsq->first_task);
6649 }
6650 
6651 __bpf_kfunc_end_defs();
6652 
6653 static s32 __bstr_format(struct scx_sched *sch, u64 *data_buf, char *line_buf,
6654 			 size_t line_size, char *fmt, unsigned long long *data,
6655 			 u32 data__sz)
6656 {
6657 	struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
6658 	s32 ret;
6659 
6660 	if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
6661 	    (data__sz && !data)) {
6662 		scx_error(sch, "invalid data=%p and data__sz=%u", (void *)data, data__sz);
6663 		return -EINVAL;
6664 	}
6665 
6666 	ret = copy_from_kernel_nofault(data_buf, data, data__sz);
6667 	if (ret < 0) {
6668 		scx_error(sch, "failed to read data fields (%d)", ret);
6669 		return ret;
6670 	}
6671 
6672 	ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
6673 				  &bprintf_data);
6674 	if (ret < 0) {
6675 		scx_error(sch, "format preparation failed (%d)", ret);
6676 		return ret;
6677 	}
6678 
6679 	ret = bstr_printf(line_buf, line_size, fmt,
6680 			  bprintf_data.bin_args);
6681 	bpf_bprintf_cleanup(&bprintf_data);
6682 	if (ret < 0) {
6683 		scx_error(sch, "(\"%s\", %p, %u) failed to format", fmt, data, data__sz);
6684 		return ret;
6685 	}
6686 
6687 	return ret;
6688 }
6689 
6690 static s32 bstr_format(struct scx_sched *sch, struct scx_bstr_buf *buf,
6691 		       char *fmt, unsigned long long *data, u32 data__sz)
6692 {
6693 	return __bstr_format(sch, buf->data, buf->line, sizeof(buf->line),
6694 			     fmt, data, data__sz);
6695 }
6696 
6697 __bpf_kfunc_start_defs();
6698 
6699 /**
6700  * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
6701  * @exit_code: Exit value to pass to user space via struct scx_exit_info.
6702  * @fmt: error message format string
6703  * @data: format string parameters packaged using ___bpf_fill() macro
6704  * @data__sz: @data len, must end in '__sz' for the verifier
6705  *
6706  * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
6707  * disabling.
6708  */
6709 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
6710 				   unsigned long long *data, u32 data__sz)
6711 {
6712 	struct scx_sched *sch;
6713 	unsigned long flags;
6714 
6715 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
6716 	sch = rcu_dereference_bh(scx_root);
6717 	if (likely(sch) &&
6718 	    bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
6719 		scx_exit(sch, SCX_EXIT_UNREG_BPF, exit_code, "%s", scx_exit_bstr_buf.line);
6720 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
6721 }
6722 
6723 /**
6724  * scx_bpf_error_bstr - Indicate fatal error
6725  * @fmt: error message format string
6726  * @data: format string parameters packaged using ___bpf_fill() macro
6727  * @data__sz: @data len, must end in '__sz' for the verifier
6728  *
6729  * Indicate that the BPF scheduler encountered a fatal error and initiate ops
6730  * disabling.
6731  */
6732 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
6733 				    u32 data__sz)
6734 {
6735 	struct scx_sched *sch;
6736 	unsigned long flags;
6737 
6738 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
6739 	sch = rcu_dereference_bh(scx_root);
6740 	if (likely(sch) &&
6741 	    bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
6742 		scx_exit(sch, SCX_EXIT_ERROR_BPF, 0, "%s", scx_exit_bstr_buf.line);
6743 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
6744 }
6745 
6746 /**
6747  * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler
6748  * @fmt: format string
6749  * @data: format string parameters packaged using ___bpf_fill() macro
6750  * @data__sz: @data len, must end in '__sz' for the verifier
6751  *
6752  * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
6753  * dump_task() to generate extra debug dump specific to the BPF scheduler.
6754  *
6755  * The extra dump may be multiple lines. A single line may be split over
6756  * multiple calls. The last line is automatically terminated.
6757  */
6758 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
6759 				   u32 data__sz)
6760 {
6761 	struct scx_sched *sch;
6762 	struct scx_dump_data *dd = &scx_dump_data;
6763 	struct scx_bstr_buf *buf = &dd->buf;
6764 	s32 ret;
6765 
6766 	guard(rcu)();
6767 
6768 	sch = rcu_dereference(scx_root);
6769 	if (unlikely(!sch))
6770 		return;
6771 
6772 	if (raw_smp_processor_id() != dd->cpu) {
6773 		scx_error(sch, "scx_bpf_dump() must only be called from ops.dump() and friends");
6774 		return;
6775 	}
6776 
6777 	/* append the formatted string to the line buf */
6778 	ret = __bstr_format(sch, buf->data, buf->line + dd->cursor,
6779 			    sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
6780 	if (ret < 0) {
6781 		dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
6782 			  dd->prefix, fmt, data, data__sz, ret);
6783 		return;
6784 	}
6785 
6786 	dd->cursor += ret;
6787 	dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
6788 
6789 	if (!dd->cursor)
6790 		return;
6791 
6792 	/*
6793 	 * If the line buf overflowed or ends in a newline, flush it into the
6794 	 * dump. This is to allow the caller to generate a single line over
6795 	 * multiple calls. As ops_dump_flush() can also handle multiple lines in
6796 	 * the line buf, the only case which can lead to an unexpected
6797 	 * truncation is when the caller keeps generating newlines in the middle
6798 	 * instead of the end consecutively. Don't do that.
6799 	 */
6800 	if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
6801 		ops_dump_flush();
6802 }
6803 
6804 /**
6805  * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
6806  *
6807  * Iterate over all of the tasks currently enqueued on the local DSQ of the
6808  * caller's CPU, and re-enqueue them in the BPF scheduler. Can be called from
6809  * anywhere.
6810  */
6811 __bpf_kfunc void scx_bpf_reenqueue_local___v2(void)
6812 {
6813 	struct rq *rq;
6814 
6815 	guard(preempt)();
6816 
6817 	rq = this_rq();
6818 	local_set(&rq->scx.reenq_local_deferred, 1);
6819 	schedule_deferred(rq);
6820 }
6821 
6822 /**
6823  * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
6824  * @cpu: CPU of interest
6825  *
6826  * Return the maximum relative capacity of @cpu in relation to the most
6827  * performant CPU in the system. The return value is in the range [1,
6828  * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
6829  */
6830 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
6831 {
6832 	struct scx_sched *sch;
6833 
6834 	guard(rcu)();
6835 
6836 	sch = rcu_dereference(scx_root);
6837 	if (likely(sch) && ops_cpu_valid(sch, cpu, NULL))
6838 		return arch_scale_cpu_capacity(cpu);
6839 	else
6840 		return SCX_CPUPERF_ONE;
6841 }
6842 
6843 /**
6844  * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
6845  * @cpu: CPU of interest
6846  *
6847  * Return the current relative performance of @cpu in relation to its maximum.
6848  * The return value is in the range [1, %SCX_CPUPERF_ONE].
6849  *
6850  * The current performance level of a CPU in relation to the maximum performance
6851  * available in the system can be calculated as follows:
6852  *
6853  *   scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
6854  *
6855  * The result is in the range [1, %SCX_CPUPERF_ONE].
6856  */
6857 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
6858 {
6859 	struct scx_sched *sch;
6860 
6861 	guard(rcu)();
6862 
6863 	sch = rcu_dereference(scx_root);
6864 	if (likely(sch) && ops_cpu_valid(sch, cpu, NULL))
6865 		return arch_scale_freq_capacity(cpu);
6866 	else
6867 		return SCX_CPUPERF_ONE;
6868 }
6869 
6870 /**
6871  * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
6872  * @cpu: CPU of interest
6873  * @perf: target performance level [0, %SCX_CPUPERF_ONE]
6874  *
6875  * Set the target performance level of @cpu to @perf. @perf is in linear
6876  * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
6877  * schedutil cpufreq governor chooses the target frequency.
6878  *
6879  * The actual performance level chosen, CPU grouping, and the overhead and
6880  * latency of the operations are dependent on the hardware and cpufreq driver in
6881  * use. Consult hardware and cpufreq documentation for more information. The
6882  * current performance level can be monitored using scx_bpf_cpuperf_cur().
6883  */
6884 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
6885 {
6886 	struct scx_sched *sch;
6887 
6888 	guard(rcu)();
6889 
6890 	sch = rcu_dereference(scx_root);
6891 	if (unlikely(!sch))
6892 		return;
6893 
6894 	if (unlikely(perf > SCX_CPUPERF_ONE)) {
6895 		scx_error(sch, "Invalid cpuperf target %u for CPU %d", perf, cpu);
6896 		return;
6897 	}
6898 
6899 	if (ops_cpu_valid(sch, cpu, NULL)) {
6900 		struct rq *rq = cpu_rq(cpu), *locked_rq = scx_locked_rq();
6901 		struct rq_flags rf;
6902 
6903 		/*
6904 		 * When called with an rq lock held, restrict the operation
6905 		 * to the corresponding CPU to prevent ABBA deadlocks.
6906 		 */
6907 		if (locked_rq && rq != locked_rq) {
6908 			scx_error(sch, "Invalid target CPU %d", cpu);
6909 			return;
6910 		}
6911 
6912 		/*
6913 		 * If no rq lock is held, allow to operate on any CPU by
6914 		 * acquiring the corresponding rq lock.
6915 		 */
6916 		if (!locked_rq) {
6917 			rq_lock_irqsave(rq, &rf);
6918 			update_rq_clock(rq);
6919 		}
6920 
6921 		rq->scx.cpuperf_target = perf;
6922 		cpufreq_update_util(rq, 0);
6923 
6924 		if (!locked_rq)
6925 			rq_unlock_irqrestore(rq, &rf);
6926 	}
6927 }
6928 
6929 /**
6930  * scx_bpf_nr_node_ids - Return the number of possible node IDs
6931  *
6932  * All valid node IDs in the system are smaller than the returned value.
6933  */
6934 __bpf_kfunc u32 scx_bpf_nr_node_ids(void)
6935 {
6936 	return nr_node_ids;
6937 }
6938 
6939 /**
6940  * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
6941  *
6942  * All valid CPU IDs in the system are smaller than the returned value.
6943  */
6944 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
6945 {
6946 	return nr_cpu_ids;
6947 }
6948 
6949 /**
6950  * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
6951  */
6952 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
6953 {
6954 	return cpu_possible_mask;
6955 }
6956 
6957 /**
6958  * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
6959  */
6960 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
6961 {
6962 	return cpu_online_mask;
6963 }
6964 
6965 /**
6966  * scx_bpf_put_cpumask - Release a possible/online cpumask
6967  * @cpumask: cpumask to release
6968  */
6969 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
6970 {
6971 	/*
6972 	 * Empty function body because we aren't actually acquiring or releasing
6973 	 * a reference to a global cpumask, which is read-only in the caller and
6974 	 * is never released. The acquire / release semantics here are just used
6975 	 * to make the cpumask is a trusted pointer in the caller.
6976 	 */
6977 }
6978 
6979 /**
6980  * scx_bpf_task_running - Is task currently running?
6981  * @p: task of interest
6982  */
6983 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
6984 {
6985 	return task_rq(p)->curr == p;
6986 }
6987 
6988 /**
6989  * scx_bpf_task_cpu - CPU a task is currently associated with
6990  * @p: task of interest
6991  */
6992 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
6993 {
6994 	return task_cpu(p);
6995 }
6996 
6997 /**
6998  * scx_bpf_cpu_rq - Fetch the rq of a CPU
6999  * @cpu: CPU of the rq
7000  */
7001 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
7002 {
7003 	struct scx_sched *sch;
7004 
7005 	guard(rcu)();
7006 
7007 	sch = rcu_dereference(scx_root);
7008 	if (unlikely(!sch))
7009 		return NULL;
7010 
7011 	if (!ops_cpu_valid(sch, cpu, NULL))
7012 		return NULL;
7013 
7014 	if (!sch->warned_deprecated_rq) {
7015 		printk_deferred(KERN_WARNING "sched_ext: %s() is deprecated; "
7016 				"use scx_bpf_locked_rq() when holding rq lock "
7017 				"or scx_bpf_cpu_curr() to read remote curr safely.\n", __func__);
7018 		sch->warned_deprecated_rq = true;
7019 	}
7020 
7021 	return cpu_rq(cpu);
7022 }
7023 
7024 /**
7025  * scx_bpf_locked_rq - Return the rq currently locked by SCX
7026  *
7027  * Returns the rq if a rq lock is currently held by SCX.
7028  * Otherwise emits an error and returns NULL.
7029  */
7030 __bpf_kfunc struct rq *scx_bpf_locked_rq(void)
7031 {
7032 	struct scx_sched *sch;
7033 	struct rq *rq;
7034 
7035 	guard(preempt)();
7036 
7037 	sch = rcu_dereference_sched(scx_root);
7038 	if (unlikely(!sch))
7039 		return NULL;
7040 
7041 	rq = scx_locked_rq();
7042 	if (!rq) {
7043 		scx_error(sch, "accessing rq without holding rq lock");
7044 		return NULL;
7045 	}
7046 
7047 	return rq;
7048 }
7049 
7050 /**
7051  * scx_bpf_cpu_curr - Return remote CPU's curr task
7052  * @cpu: CPU of interest
7053  *
7054  * Callers must hold RCU read lock (KF_RCU).
7055  */
7056 __bpf_kfunc struct task_struct *scx_bpf_cpu_curr(s32 cpu)
7057 {
7058 	struct scx_sched *sch;
7059 
7060 	guard(rcu)();
7061 
7062 	sch = rcu_dereference(scx_root);
7063 	if (unlikely(!sch))
7064 		return NULL;
7065 
7066 	if (!ops_cpu_valid(sch, cpu, NULL))
7067 		return NULL;
7068 
7069 	return rcu_dereference(cpu_rq(cpu)->curr);
7070 }
7071 
7072 /**
7073  * scx_bpf_task_cgroup - Return the sched cgroup of a task
7074  * @p: task of interest
7075  *
7076  * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
7077  * from the scheduler's POV. SCX operations should use this function to
7078  * determine @p's current cgroup as, unlike following @p->cgroups,
7079  * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
7080  * rq-locked operations. Can be called on the parameter tasks of rq-locked
7081  * operations. The restriction guarantees that @p's rq is locked by the caller.
7082  */
7083 #ifdef CONFIG_CGROUP_SCHED
7084 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
7085 {
7086 	struct task_group *tg = p->sched_task_group;
7087 	struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
7088 	struct scx_sched *sch;
7089 
7090 	guard(rcu)();
7091 
7092 	sch = rcu_dereference(scx_root);
7093 	if (unlikely(!sch))
7094 		goto out;
7095 
7096 	if (!scx_kf_allowed_on_arg_tasks(sch, __SCX_KF_RQ_LOCKED, p))
7097 		goto out;
7098 
7099 	cgrp = tg_cgrp(tg);
7100 
7101 out:
7102 	cgroup_get(cgrp);
7103 	return cgrp;
7104 }
7105 #endif
7106 
7107 /**
7108  * scx_bpf_now - Returns a high-performance monotonically non-decreasing
7109  * clock for the current CPU. The clock returned is in nanoseconds.
7110  *
7111  * It provides the following properties:
7112  *
7113  * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently
7114  *  to account for execution time and track tasks' runtime properties.
7115  *  Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which
7116  *  eventually reads a hardware timestamp counter -- is neither performant nor
7117  *  scalable. scx_bpf_now() aims to provide a high-performance clock by
7118  *  using the rq clock in the scheduler core whenever possible.
7119  *
7120  * 2) High enough resolution for the BPF scheduler use cases: In most BPF
7121  *  scheduler use cases, the required clock resolution is lower than the most
7122  *  accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically
7123  *  uses the rq clock in the scheduler core whenever it is valid. It considers
7124  *  that the rq clock is valid from the time the rq clock is updated
7125  *  (update_rq_clock) until the rq is unlocked (rq_unpin_lock).
7126  *
7127  * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now()
7128  *  guarantees the clock never goes backward when comparing them in the same
7129  *  CPU. On the other hand, when comparing clocks in different CPUs, there
7130  *  is no such guarantee -- the clock can go backward. It provides a
7131  *  monotonically *non-decreasing* clock so that it would provide the same
7132  *  clock values in two different scx_bpf_now() calls in the same CPU
7133  *  during the same period of when the rq clock is valid.
7134  */
7135 __bpf_kfunc u64 scx_bpf_now(void)
7136 {
7137 	struct rq *rq;
7138 	u64 clock;
7139 
7140 	preempt_disable();
7141 
7142 	rq = this_rq();
7143 	if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) {
7144 		/*
7145 		 * If the rq clock is valid, use the cached rq clock.
7146 		 *
7147 		 * Note that scx_bpf_now() is re-entrant between a process
7148 		 * context and an interrupt context (e.g., timer interrupt).
7149 		 * However, we don't need to consider the race between them
7150 		 * because such race is not observable from a caller.
7151 		 */
7152 		clock = READ_ONCE(rq->scx.clock);
7153 	} else {
7154 		/*
7155 		 * Otherwise, return a fresh rq clock.
7156 		 *
7157 		 * The rq clock is updated outside of the rq lock.
7158 		 * In this case, keep the updated rq clock invalid so the next
7159 		 * kfunc call outside the rq lock gets a fresh rq clock.
7160 		 */
7161 		clock = sched_clock_cpu(cpu_of(rq));
7162 	}
7163 
7164 	preempt_enable();
7165 
7166 	return clock;
7167 }
7168 
7169 static void scx_read_events(struct scx_sched *sch, struct scx_event_stats *events)
7170 {
7171 	struct scx_event_stats *e_cpu;
7172 	int cpu;
7173 
7174 	/* Aggregate per-CPU event counters into @events. */
7175 	memset(events, 0, sizeof(*events));
7176 	for_each_possible_cpu(cpu) {
7177 		e_cpu = &per_cpu_ptr(sch->pcpu, cpu)->event_stats;
7178 		scx_agg_event(events, e_cpu, SCX_EV_SELECT_CPU_FALLBACK);
7179 		scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
7180 		scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_KEEP_LAST);
7181 		scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_EXITING);
7182 		scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
7183 		scx_agg_event(events, e_cpu, SCX_EV_REFILL_SLICE_DFL);
7184 		scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DURATION);
7185 		scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DISPATCH);
7186 		scx_agg_event(events, e_cpu, SCX_EV_BYPASS_ACTIVATE);
7187 	}
7188 }
7189 
7190 /*
7191  * scx_bpf_events - Get a system-wide event counter to
7192  * @events: output buffer from a BPF program
7193  * @events__sz: @events len, must end in '__sz'' for the verifier
7194  */
7195 __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events,
7196 				size_t events__sz)
7197 {
7198 	struct scx_sched *sch;
7199 	struct scx_event_stats e_sys;
7200 
7201 	rcu_read_lock();
7202 	sch = rcu_dereference(scx_root);
7203 	if (sch)
7204 		scx_read_events(sch, &e_sys);
7205 	else
7206 		memset(&e_sys, 0, sizeof(e_sys));
7207 	rcu_read_unlock();
7208 
7209 	/*
7210 	 * We cannot entirely trust a BPF-provided size since a BPF program
7211 	 * might be compiled against a different vmlinux.h, of which
7212 	 * scx_event_stats would be larger (a newer vmlinux.h) or smaller
7213 	 * (an older vmlinux.h). Hence, we use the smaller size to avoid
7214 	 * memory corruption.
7215 	 */
7216 	events__sz = min(events__sz, sizeof(*events));
7217 	memcpy(events, &e_sys, events__sz);
7218 }
7219 
7220 __bpf_kfunc_end_defs();
7221 
7222 BTF_KFUNCS_START(scx_kfunc_ids_any)
7223 BTF_ID_FLAGS(func, scx_bpf_task_set_slice, KF_RCU);
7224 BTF_ID_FLAGS(func, scx_bpf_task_set_dsq_vtime, KF_RCU);
7225 BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
7226 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
7227 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
7228 BTF_ID_FLAGS(func, scx_bpf_dsq_peek, KF_RCU_PROTECTED | KF_RET_NULL)
7229 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
7230 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
7231 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
7232 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
7233 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
7234 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
7235 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local___v2)
7236 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
7237 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
7238 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
7239 BTF_ID_FLAGS(func, scx_bpf_nr_node_ids)
7240 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
7241 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
7242 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
7243 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
7244 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
7245 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
7246 BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
7247 BTF_ID_FLAGS(func, scx_bpf_locked_rq, KF_RET_NULL)
7248 BTF_ID_FLAGS(func, scx_bpf_cpu_curr, KF_RET_NULL | KF_RCU_PROTECTED)
7249 #ifdef CONFIG_CGROUP_SCHED
7250 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
7251 #endif
7252 BTF_ID_FLAGS(func, scx_bpf_now)
7253 BTF_ID_FLAGS(func, scx_bpf_events, KF_TRUSTED_ARGS)
7254 BTF_KFUNCS_END(scx_kfunc_ids_any)
7255 
7256 static const struct btf_kfunc_id_set scx_kfunc_set_any = {
7257 	.owner			= THIS_MODULE,
7258 	.set			= &scx_kfunc_ids_any,
7259 };
7260 
7261 static int __init scx_init(void)
7262 {
7263 	int ret;
7264 
7265 	/*
7266 	 * kfunc registration can't be done from init_sched_ext_class() as
7267 	 * register_btf_kfunc_id_set() needs most of the system to be up.
7268 	 *
7269 	 * Some kfuncs are context-sensitive and can only be called from
7270 	 * specific SCX ops. They are grouped into BTF sets accordingly.
7271 	 * Unfortunately, BPF currently doesn't have a way of enforcing such
7272 	 * restrictions. Eventually, the verifier should be able to enforce
7273 	 * them. For now, register them the same and make each kfunc explicitly
7274 	 * check using scx_kf_allowed().
7275 	 */
7276 	if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7277 					     &scx_kfunc_set_enqueue_dispatch)) ||
7278 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7279 					     &scx_kfunc_set_dispatch)) ||
7280 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7281 					     &scx_kfunc_set_cpu_release)) ||
7282 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7283 					     &scx_kfunc_set_unlocked)) ||
7284 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7285 					     &scx_kfunc_set_unlocked)) ||
7286 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7287 					     &scx_kfunc_set_any)) ||
7288 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
7289 					     &scx_kfunc_set_any)) ||
7290 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7291 					     &scx_kfunc_set_any))) {
7292 		pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
7293 		return ret;
7294 	}
7295 
7296 	ret = scx_idle_init();
7297 	if (ret) {
7298 		pr_err("sched_ext: Failed to initialize idle tracking (%d)\n", ret);
7299 		return ret;
7300 	}
7301 
7302 	ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
7303 	if (ret) {
7304 		pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
7305 		return ret;
7306 	}
7307 
7308 	ret = register_pm_notifier(&scx_pm_notifier);
7309 	if (ret) {
7310 		pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
7311 		return ret;
7312 	}
7313 
7314 	scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
7315 	if (!scx_kset) {
7316 		pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
7317 		return -ENOMEM;
7318 	}
7319 
7320 	ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
7321 	if (ret < 0) {
7322 		pr_err("sched_ext: Failed to add global attributes\n");
7323 		return ret;
7324 	}
7325 
7326 	if (!alloc_cpumask_var(&scx_bypass_lb_donee_cpumask, GFP_KERNEL) ||
7327 	    !alloc_cpumask_var(&scx_bypass_lb_resched_cpumask, GFP_KERNEL)) {
7328 		pr_err("sched_ext: Failed to allocate cpumasks\n");
7329 		return -ENOMEM;
7330 	}
7331 
7332 	return 0;
7333 }
7334 __initcall(scx_init);
7335