xref: /linux/kernel/sched/ext.c (revision a23cd25baed2316e50597f8b67192bdc904f955b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4  *
5  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8  */
9 #include <linux/btf_ids.h>
10 #include "ext_idle.h"
11 
12 /*
13  * NOTE: sched_ext is in the process of growing multiple scheduler support and
14  * scx_root usage is in a transitional state. Naked dereferences are safe if the
15  * caller is one of the tasks attached to SCX and explicit RCU dereference is
16  * necessary otherwise. Naked scx_root dereferences trigger sparse warnings but
17  * are used as temporary markers to indicate that the dereferences need to be
18  * updated to point to the associated scheduler instances rather than scx_root.
19  */
20 static struct scx_sched __rcu *scx_root;
21 
22 /*
23  * During exit, a task may schedule after losing its PIDs. When disabling the
24  * BPF scheduler, we need to be able to iterate tasks in every state to
25  * guarantee system safety. Maintain a dedicated task list which contains every
26  * task between its fork and eventual free.
27  */
28 static DEFINE_SPINLOCK(scx_tasks_lock);
29 static LIST_HEAD(scx_tasks);
30 
31 /* ops enable/disable */
32 static DEFINE_MUTEX(scx_enable_mutex);
33 DEFINE_STATIC_KEY_FALSE(__scx_enabled);
34 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
35 static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED);
36 static unsigned long scx_in_softlockup;
37 static atomic_t scx_breather_depth = ATOMIC_INIT(0);
38 static int scx_bypass_depth;
39 static bool scx_init_task_enabled;
40 static bool scx_switching_all;
41 DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
42 
43 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
44 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
45 
46 /*
47  * A monotically increasing sequence number that is incremented every time a
48  * scheduler is enabled. This can be used by to check if any custom sched_ext
49  * scheduler has ever been used in the system.
50  */
51 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
52 
53 /*
54  * The maximum amount of time in jiffies that a task may be runnable without
55  * being scheduled on a CPU. If this timeout is exceeded, it will trigger
56  * scx_error().
57  */
58 static unsigned long scx_watchdog_timeout;
59 
60 /*
61  * The last time the delayed work was run. This delayed work relies on
62  * ksoftirqd being able to run to service timer interrupts, so it's possible
63  * that this work itself could get wedged. To account for this, we check that
64  * it's not stalled in the timer tick, and trigger an error if it is.
65  */
66 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
67 
68 static struct delayed_work scx_watchdog_work;
69 
70 /* for %SCX_KICK_WAIT */
71 static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
72 
73 /*
74  * Direct dispatch marker.
75  *
76  * Non-NULL values are used for direct dispatch from enqueue path. A valid
77  * pointer points to the task currently being enqueued. An ERR_PTR value is used
78  * to indicate that direct dispatch has already happened.
79  */
80 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
81 
82 static const struct rhashtable_params dsq_hash_params = {
83 	.key_len		= sizeof_field(struct scx_dispatch_q, id),
84 	.key_offset		= offsetof(struct scx_dispatch_q, id),
85 	.head_offset		= offsetof(struct scx_dispatch_q, hash_node),
86 };
87 
88 static LLIST_HEAD(dsqs_to_free);
89 
90 /* dispatch buf */
91 struct scx_dsp_buf_ent {
92 	struct task_struct	*task;
93 	unsigned long		qseq;
94 	u64			dsq_id;
95 	u64			enq_flags;
96 };
97 
98 static u32 scx_dsp_max_batch;
99 
100 struct scx_dsp_ctx {
101 	struct rq		*rq;
102 	u32			cursor;
103 	u32			nr_tasks;
104 	struct scx_dsp_buf_ent	buf[];
105 };
106 
107 static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
108 
109 /* string formatting from BPF */
110 struct scx_bstr_buf {
111 	u64			data[MAX_BPRINTF_VARARGS];
112 	char			line[SCX_EXIT_MSG_LEN];
113 };
114 
115 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
116 static struct scx_bstr_buf scx_exit_bstr_buf;
117 
118 /* ops debug dump */
119 struct scx_dump_data {
120 	s32			cpu;
121 	bool			first;
122 	s32			cursor;
123 	struct seq_buf		*s;
124 	const char		*prefix;
125 	struct scx_bstr_buf	buf;
126 };
127 
128 static struct scx_dump_data scx_dump_data = {
129 	.cpu			= -1,
130 };
131 
132 /* /sys/kernel/sched_ext interface */
133 static struct kset *scx_kset;
134 
135 #define CREATE_TRACE_POINTS
136 #include <trace/events/sched_ext.h>
137 
138 static void process_ddsp_deferred_locals(struct rq *rq);
139 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags);
140 static void scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind,
141 		      s64 exit_code, const char *fmt, va_list args);
142 
scx_exit(struct scx_sched * sch,enum scx_exit_kind kind,s64 exit_code,const char * fmt,...)143 static __printf(4, 5) void scx_exit(struct scx_sched *sch,
144 				    enum scx_exit_kind kind, s64 exit_code,
145 				    const char *fmt, ...)
146 {
147 	va_list args;
148 
149 	va_start(args, fmt);
150 	scx_vexit(sch, kind, exit_code, fmt, args);
151 	va_end(args);
152 }
153 
154 #define scx_error(sch, fmt, args...)	scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args)
155 
156 #define SCX_HAS_OP(sch, op)	test_bit(SCX_OP_IDX(op), (sch)->has_op)
157 
jiffies_delta_msecs(unsigned long at,unsigned long now)158 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
159 {
160 	if (time_after(at, now))
161 		return jiffies_to_msecs(at - now);
162 	else
163 		return -(long)jiffies_to_msecs(now - at);
164 }
165 
166 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */
higher_bits(u32 flags)167 static u32 higher_bits(u32 flags)
168 {
169 	return ~((1 << fls(flags)) - 1);
170 }
171 
172 /* return the mask with only the highest bit set */
highest_bit(u32 flags)173 static u32 highest_bit(u32 flags)
174 {
175 	int bit = fls(flags);
176 	return ((u64)1 << bit) >> 1;
177 }
178 
u32_before(u32 a,u32 b)179 static bool u32_before(u32 a, u32 b)
180 {
181 	return (s32)(a - b) < 0;
182 }
183 
find_global_dsq(struct scx_sched * sch,struct task_struct * p)184 static struct scx_dispatch_q *find_global_dsq(struct scx_sched *sch,
185 					      struct task_struct *p)
186 {
187 	return sch->global_dsqs[cpu_to_node(task_cpu(p))];
188 }
189 
find_user_dsq(struct scx_sched * sch,u64 dsq_id)190 static struct scx_dispatch_q *find_user_dsq(struct scx_sched *sch, u64 dsq_id)
191 {
192 	return rhashtable_lookup_fast(&sch->dsq_hash, &dsq_id, dsq_hash_params);
193 }
194 
195 /*
196  * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
197  * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
198  * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
199  * whether it's running from an allowed context.
200  *
201  * @mask is constant, always inline to cull the mask calculations.
202  */
scx_kf_allow(u32 mask)203 static __always_inline void scx_kf_allow(u32 mask)
204 {
205 	/* nesting is allowed only in increasing scx_kf_mask order */
206 	WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
207 		  "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
208 		  current->scx.kf_mask, mask);
209 	current->scx.kf_mask |= mask;
210 	barrier();
211 }
212 
scx_kf_disallow(u32 mask)213 static void scx_kf_disallow(u32 mask)
214 {
215 	barrier();
216 	current->scx.kf_mask &= ~mask;
217 }
218 
219 /*
220  * Track the rq currently locked.
221  *
222  * This allows kfuncs to safely operate on rq from any scx ops callback,
223  * knowing which rq is already locked.
224  */
225 DEFINE_PER_CPU(struct rq *, scx_locked_rq_state);
226 
update_locked_rq(struct rq * rq)227 static inline void update_locked_rq(struct rq *rq)
228 {
229 	/*
230 	 * Check whether @rq is actually locked. This can help expose bugs
231 	 * or incorrect assumptions about the context in which a kfunc or
232 	 * callback is executed.
233 	 */
234 	if (rq)
235 		lockdep_assert_rq_held(rq);
236 	__this_cpu_write(scx_locked_rq_state, rq);
237 }
238 
239 #define SCX_CALL_OP(sch, mask, op, rq, args...)					\
240 do {										\
241 	if (rq)									\
242 		update_locked_rq(rq);						\
243 	if (mask) {								\
244 		scx_kf_allow(mask);						\
245 		(sch)->ops.op(args);						\
246 		scx_kf_disallow(mask);						\
247 	} else {								\
248 		(sch)->ops.op(args);						\
249 	}									\
250 	if (rq)									\
251 		update_locked_rq(NULL);						\
252 } while (0)
253 
254 #define SCX_CALL_OP_RET(sch, mask, op, rq, args...)				\
255 ({										\
256 	__typeof__((sch)->ops.op(args)) __ret;					\
257 										\
258 	if (rq)									\
259 		update_locked_rq(rq);						\
260 	if (mask) {								\
261 		scx_kf_allow(mask);						\
262 		__ret = (sch)->ops.op(args);					\
263 		scx_kf_disallow(mask);						\
264 	} else {								\
265 		__ret = (sch)->ops.op(args);					\
266 	}									\
267 	if (rq)									\
268 		update_locked_rq(NULL);						\
269 	__ret;									\
270 })
271 
272 /*
273  * Some kfuncs are allowed only on the tasks that are subjects of the
274  * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
275  * restrictions, the following SCX_CALL_OP_*() variants should be used when
276  * invoking scx_ops operations that take task arguments. These can only be used
277  * for non-nesting operations due to the way the tasks are tracked.
278  *
279  * kfuncs which can only operate on such tasks can in turn use
280  * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
281  * the specific task.
282  */
283 #define SCX_CALL_OP_TASK(sch, mask, op, rq, task, args...)			\
284 do {										\
285 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
286 	current->scx.kf_tasks[0] = task;					\
287 	SCX_CALL_OP((sch), mask, op, rq, task, ##args);				\
288 	current->scx.kf_tasks[0] = NULL;					\
289 } while (0)
290 
291 #define SCX_CALL_OP_TASK_RET(sch, mask, op, rq, task, args...)			\
292 ({										\
293 	__typeof__((sch)->ops.op(task, ##args)) __ret;				\
294 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
295 	current->scx.kf_tasks[0] = task;					\
296 	__ret = SCX_CALL_OP_RET((sch), mask, op, rq, task, ##args);		\
297 	current->scx.kf_tasks[0] = NULL;					\
298 	__ret;									\
299 })
300 
301 #define SCX_CALL_OP_2TASKS_RET(sch, mask, op, rq, task0, task1, args...)	\
302 ({										\
303 	__typeof__((sch)->ops.op(task0, task1, ##args)) __ret;			\
304 	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
305 	current->scx.kf_tasks[0] = task0;					\
306 	current->scx.kf_tasks[1] = task1;					\
307 	__ret = SCX_CALL_OP_RET((sch), mask, op, rq, task0, task1, ##args);	\
308 	current->scx.kf_tasks[0] = NULL;					\
309 	current->scx.kf_tasks[1] = NULL;					\
310 	__ret;									\
311 })
312 
313 /* @mask is constant, always inline to cull unnecessary branches */
scx_kf_allowed(struct scx_sched * sch,u32 mask)314 static __always_inline bool scx_kf_allowed(struct scx_sched *sch, u32 mask)
315 {
316 	if (unlikely(!(current->scx.kf_mask & mask))) {
317 		scx_error(sch, "kfunc with mask 0x%x called from an operation only allowing 0x%x",
318 			  mask, current->scx.kf_mask);
319 		return false;
320 	}
321 
322 	/*
323 	 * Enforce nesting boundaries. e.g. A kfunc which can be called from
324 	 * DISPATCH must not be called if we're running DEQUEUE which is nested
325 	 * inside ops.dispatch(). We don't need to check boundaries for any
326 	 * blocking kfuncs as the verifier ensures they're only called from
327 	 * sleepable progs.
328 	 */
329 	if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
330 		     (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
331 		scx_error(sch, "cpu_release kfunc called from a nested operation");
332 		return false;
333 	}
334 
335 	if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
336 		     (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
337 		scx_error(sch, "dispatch kfunc called from a nested operation");
338 		return false;
339 	}
340 
341 	return true;
342 }
343 
344 /* see SCX_CALL_OP_TASK() */
scx_kf_allowed_on_arg_tasks(struct scx_sched * sch,u32 mask,struct task_struct * p)345 static __always_inline bool scx_kf_allowed_on_arg_tasks(struct scx_sched *sch,
346 							u32 mask,
347 							struct task_struct *p)
348 {
349 	if (!scx_kf_allowed(sch, mask))
350 		return false;
351 
352 	if (unlikely((p != current->scx.kf_tasks[0] &&
353 		      p != current->scx.kf_tasks[1]))) {
354 		scx_error(sch, "called on a task not being operated on");
355 		return false;
356 	}
357 
358 	return true;
359 }
360 
361 /**
362  * nldsq_next_task - Iterate to the next task in a non-local DSQ
363  * @dsq: user dsq being iterated
364  * @cur: current position, %NULL to start iteration
365  * @rev: walk backwards
366  *
367  * Returns %NULL when iteration is finished.
368  */
nldsq_next_task(struct scx_dispatch_q * dsq,struct task_struct * cur,bool rev)369 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
370 					   struct task_struct *cur, bool rev)
371 {
372 	struct list_head *list_node;
373 	struct scx_dsq_list_node *dsq_lnode;
374 
375 	lockdep_assert_held(&dsq->lock);
376 
377 	if (cur)
378 		list_node = &cur->scx.dsq_list.node;
379 	else
380 		list_node = &dsq->list;
381 
382 	/* find the next task, need to skip BPF iteration cursors */
383 	do {
384 		if (rev)
385 			list_node = list_node->prev;
386 		else
387 			list_node = list_node->next;
388 
389 		if (list_node == &dsq->list)
390 			return NULL;
391 
392 		dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
393 					 node);
394 	} while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
395 
396 	return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
397 }
398 
399 #define nldsq_for_each_task(p, dsq)						\
400 	for ((p) = nldsq_next_task((dsq), NULL, false); (p);			\
401 	     (p) = nldsq_next_task((dsq), (p), false))
402 
403 
404 /*
405  * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
406  * dispatch order. BPF-visible iterator is opaque and larger to allow future
407  * changes without breaking backward compatibility. Can be used with
408  * bpf_for_each(). See bpf_iter_scx_dsq_*().
409  */
410 enum scx_dsq_iter_flags {
411 	/* iterate in the reverse dispatch order */
412 	SCX_DSQ_ITER_REV		= 1U << 16,
413 
414 	__SCX_DSQ_ITER_HAS_SLICE	= 1U << 30,
415 	__SCX_DSQ_ITER_HAS_VTIME	= 1U << 31,
416 
417 	__SCX_DSQ_ITER_USER_FLAGS	= SCX_DSQ_ITER_REV,
418 	__SCX_DSQ_ITER_ALL_FLAGS	= __SCX_DSQ_ITER_USER_FLAGS |
419 					  __SCX_DSQ_ITER_HAS_SLICE |
420 					  __SCX_DSQ_ITER_HAS_VTIME,
421 };
422 
423 struct bpf_iter_scx_dsq_kern {
424 	struct scx_dsq_list_node	cursor;
425 	struct scx_dispatch_q		*dsq;
426 	u64				slice;
427 	u64				vtime;
428 } __attribute__((aligned(8)));
429 
430 struct bpf_iter_scx_dsq {
431 	u64				__opaque[6];
432 } __attribute__((aligned(8)));
433 
434 
435 /*
436  * SCX task iterator.
437  */
438 struct scx_task_iter {
439 	struct sched_ext_entity		cursor;
440 	struct task_struct		*locked_task;
441 	struct rq			*rq;
442 	struct rq_flags			rf;
443 	u32				cnt;
444 	bool				list_locked;
445 };
446 
447 /**
448  * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
449  * @iter: iterator to init
450  *
451  * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter
452  * must eventually be stopped with scx_task_iter_stop().
453  *
454  * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
455  * between this and the first next() call or between any two next() calls. If
456  * the locks are released between two next() calls, the caller is responsible
457  * for ensuring that the task being iterated remains accessible either through
458  * RCU read lock or obtaining a reference count.
459  *
460  * All tasks which existed when the iteration started are guaranteed to be
461  * visited as long as they still exist.
462  */
scx_task_iter_start(struct scx_task_iter * iter)463 static void scx_task_iter_start(struct scx_task_iter *iter)
464 {
465 	BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
466 		     ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
467 
468 	spin_lock_irq(&scx_tasks_lock);
469 
470 	iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
471 	list_add(&iter->cursor.tasks_node, &scx_tasks);
472 	iter->locked_task = NULL;
473 	iter->cnt = 0;
474 	iter->list_locked = true;
475 }
476 
__scx_task_iter_rq_unlock(struct scx_task_iter * iter)477 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
478 {
479 	if (iter->locked_task) {
480 		task_rq_unlock(iter->rq, iter->locked_task, &iter->rf);
481 		iter->locked_task = NULL;
482 	}
483 }
484 
485 /**
486  * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
487  * @iter: iterator to unlock
488  *
489  * If @iter is in the middle of a locked iteration, it may be locking the rq of
490  * the task currently being visited in addition to scx_tasks_lock. Unlock both.
491  * This function can be safely called anytime during an iteration. The next
492  * iterator operation will automatically restore the necessary locking.
493  */
scx_task_iter_unlock(struct scx_task_iter * iter)494 static void scx_task_iter_unlock(struct scx_task_iter *iter)
495 {
496 	__scx_task_iter_rq_unlock(iter);
497 	if (iter->list_locked) {
498 		iter->list_locked = false;
499 		spin_unlock_irq(&scx_tasks_lock);
500 	}
501 }
502 
__scx_task_iter_maybe_relock(struct scx_task_iter * iter)503 static void __scx_task_iter_maybe_relock(struct scx_task_iter *iter)
504 {
505 	if (!iter->list_locked) {
506 		spin_lock_irq(&scx_tasks_lock);
507 		iter->list_locked = true;
508 	}
509 }
510 
511 /**
512  * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
513  * @iter: iterator to exit
514  *
515  * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
516  * which is released on return. If the iterator holds a task's rq lock, that rq
517  * lock is also released. See scx_task_iter_start() for details.
518  */
scx_task_iter_stop(struct scx_task_iter * iter)519 static void scx_task_iter_stop(struct scx_task_iter *iter)
520 {
521 	__scx_task_iter_maybe_relock(iter);
522 	list_del_init(&iter->cursor.tasks_node);
523 	scx_task_iter_unlock(iter);
524 }
525 
526 /**
527  * scx_task_iter_next - Next task
528  * @iter: iterator to walk
529  *
530  * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
531  * and re-acquired every %SCX_TASK_ITER_BATCH iterations to avoid causing stalls
532  * by holding scx_tasks_lock for too long.
533  */
scx_task_iter_next(struct scx_task_iter * iter)534 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
535 {
536 	struct list_head *cursor = &iter->cursor.tasks_node;
537 	struct sched_ext_entity *pos;
538 
539 	__scx_task_iter_maybe_relock(iter);
540 
541 	if (!(++iter->cnt % SCX_TASK_ITER_BATCH)) {
542 		scx_task_iter_unlock(iter);
543 		cond_resched();
544 		__scx_task_iter_maybe_relock(iter);
545 	}
546 
547 	list_for_each_entry(pos, cursor, tasks_node) {
548 		if (&pos->tasks_node == &scx_tasks)
549 			return NULL;
550 		if (!(pos->flags & SCX_TASK_CURSOR)) {
551 			list_move(cursor, &pos->tasks_node);
552 			return container_of(pos, struct task_struct, scx);
553 		}
554 	}
555 
556 	/* can't happen, should always terminate at scx_tasks above */
557 	BUG();
558 }
559 
560 /**
561  * scx_task_iter_next_locked - Next non-idle task with its rq locked
562  * @iter: iterator to walk
563  *
564  * Visit the non-idle task with its rq lock held. Allows callers to specify
565  * whether they would like to filter out dead tasks. See scx_task_iter_start()
566  * for details.
567  */
scx_task_iter_next_locked(struct scx_task_iter * iter)568 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
569 {
570 	struct task_struct *p;
571 
572 	__scx_task_iter_rq_unlock(iter);
573 
574 	while ((p = scx_task_iter_next(iter))) {
575 		/*
576 		 * scx_task_iter is used to prepare and move tasks into SCX
577 		 * while loading the BPF scheduler and vice-versa while
578 		 * unloading. The init_tasks ("swappers") should be excluded
579 		 * from the iteration because:
580 		 *
581 		 * - It's unsafe to use __setschduler_prio() on an init_task to
582 		 *   determine the sched_class to use as it won't preserve its
583 		 *   idle_sched_class.
584 		 *
585 		 * - ops.init/exit_task() can easily be confused if called with
586 		 *   init_tasks as they, e.g., share PID 0.
587 		 *
588 		 * As init_tasks are never scheduled through SCX, they can be
589 		 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
590 		 * doesn't work here:
591 		 *
592 		 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
593 		 *   yet been onlined.
594 		 *
595 		 * - %PF_IDLE can be set on tasks that are not init_tasks. See
596 		 *   play_idle_precise() used by CONFIG_IDLE_INJECT.
597 		 *
598 		 * Test for idle_sched_class as only init_tasks are on it.
599 		 */
600 		if (p->sched_class != &idle_sched_class)
601 			break;
602 	}
603 	if (!p)
604 		return NULL;
605 
606 	iter->rq = task_rq_lock(p, &iter->rf);
607 	iter->locked_task = p;
608 
609 	return p;
610 }
611 
612 /**
613  * scx_add_event - Increase an event counter for 'name' by 'cnt'
614  * @sch: scx_sched to account events for
615  * @name: an event name defined in struct scx_event_stats
616  * @cnt: the number of the event occurred
617  *
618  * This can be used when preemption is not disabled.
619  */
620 #define scx_add_event(sch, name, cnt) do {					\
621 	this_cpu_add((sch)->pcpu->event_stats.name, (cnt));			\
622 	trace_sched_ext_event(#name, (cnt));					\
623 } while(0)
624 
625 /**
626  * __scx_add_event - Increase an event counter for 'name' by 'cnt'
627  * @sch: scx_sched to account events for
628  * @name: an event name defined in struct scx_event_stats
629  * @cnt: the number of the event occurred
630  *
631  * This should be used only when preemption is disabled.
632  */
633 #define __scx_add_event(sch, name, cnt) do {					\
634 	__this_cpu_add((sch)->pcpu->event_stats.name, (cnt));			\
635 	trace_sched_ext_event(#name, cnt);					\
636 } while(0)
637 
638 /**
639  * scx_agg_event - Aggregate an event counter 'kind' from 'src_e' to 'dst_e'
640  * @dst_e: destination event stats
641  * @src_e: source event stats
642  * @kind: a kind of event to be aggregated
643  */
644 #define scx_agg_event(dst_e, src_e, kind) do {					\
645 	(dst_e)->kind += READ_ONCE((src_e)->kind);				\
646 } while(0)
647 
648 /**
649  * scx_dump_event - Dump an event 'kind' in 'events' to 's'
650  * @s: output seq_buf
651  * @events: event stats
652  * @kind: a kind of event to dump
653  */
654 #define scx_dump_event(s, events, kind) do {					\
655 	dump_line(&(s), "%40s: %16lld", #kind, (events)->kind);			\
656 } while (0)
657 
658 
659 static void scx_read_events(struct scx_sched *sch,
660 			    struct scx_event_stats *events);
661 
scx_enable_state(void)662 static enum scx_enable_state scx_enable_state(void)
663 {
664 	return atomic_read(&scx_enable_state_var);
665 }
666 
scx_set_enable_state(enum scx_enable_state to)667 static enum scx_enable_state scx_set_enable_state(enum scx_enable_state to)
668 {
669 	return atomic_xchg(&scx_enable_state_var, to);
670 }
671 
scx_tryset_enable_state(enum scx_enable_state to,enum scx_enable_state from)672 static bool scx_tryset_enable_state(enum scx_enable_state to,
673 				    enum scx_enable_state from)
674 {
675 	int from_v = from;
676 
677 	return atomic_try_cmpxchg(&scx_enable_state_var, &from_v, to);
678 }
679 
680 /**
681  * wait_ops_state - Busy-wait the specified ops state to end
682  * @p: target task
683  * @opss: state to wait the end of
684  *
685  * Busy-wait for @p to transition out of @opss. This can only be used when the
686  * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
687  * has load_acquire semantics to ensure that the caller can see the updates made
688  * in the enqueueing and dispatching paths.
689  */
wait_ops_state(struct task_struct * p,unsigned long opss)690 static void wait_ops_state(struct task_struct *p, unsigned long opss)
691 {
692 	do {
693 		cpu_relax();
694 	} while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
695 }
696 
__cpu_valid(s32 cpu)697 static inline bool __cpu_valid(s32 cpu)
698 {
699 	return likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu));
700 }
701 
702 /**
703  * ops_cpu_valid - Verify a cpu number, to be used on ops input args
704  * @sch: scx_sched to abort on error
705  * @cpu: cpu number which came from a BPF ops
706  * @where: extra information reported on error
707  *
708  * @cpu is a cpu number which came from the BPF scheduler and can be any value.
709  * Verify that it is in range and one of the possible cpus. If invalid, trigger
710  * an ops error.
711  */
ops_cpu_valid(struct scx_sched * sch,s32 cpu,const char * where)712 static bool ops_cpu_valid(struct scx_sched *sch, s32 cpu, const char *where)
713 {
714 	if (__cpu_valid(cpu)) {
715 		return true;
716 	} else {
717 		scx_error(sch, "invalid CPU %d%s%s", cpu, where ? " " : "", where ?: "");
718 		return false;
719 	}
720 }
721 
722 /**
723  * ops_sanitize_err - Sanitize a -errno value
724  * @sch: scx_sched to error out on error
725  * @ops_name: operation to blame on failure
726  * @err: -errno value to sanitize
727  *
728  * Verify @err is a valid -errno. If not, trigger scx_error() and return
729  * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
730  * cause misbehaviors. For an example, a large negative return from
731  * ops.init_task() triggers an oops when passed up the call chain because the
732  * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
733  * handled as a pointer.
734  */
ops_sanitize_err(struct scx_sched * sch,const char * ops_name,s32 err)735 static int ops_sanitize_err(struct scx_sched *sch, const char *ops_name, s32 err)
736 {
737 	if (err < 0 && err >= -MAX_ERRNO)
738 		return err;
739 
740 	scx_error(sch, "ops.%s() returned an invalid errno %d", ops_name, err);
741 	return -EPROTO;
742 }
743 
run_deferred(struct rq * rq)744 static void run_deferred(struct rq *rq)
745 {
746 	process_ddsp_deferred_locals(rq);
747 }
748 
deferred_bal_cb_workfn(struct rq * rq)749 static void deferred_bal_cb_workfn(struct rq *rq)
750 {
751 	run_deferred(rq);
752 }
753 
deferred_irq_workfn(struct irq_work * irq_work)754 static void deferred_irq_workfn(struct irq_work *irq_work)
755 {
756 	struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
757 
758 	raw_spin_rq_lock(rq);
759 	run_deferred(rq);
760 	raw_spin_rq_unlock(rq);
761 }
762 
763 /**
764  * schedule_deferred - Schedule execution of deferred actions on an rq
765  * @rq: target rq
766  *
767  * Schedule execution of deferred actions on @rq. Must be called with @rq
768  * locked. Deferred actions are executed with @rq locked but unpinned, and thus
769  * can unlock @rq to e.g. migrate tasks to other rqs.
770  */
schedule_deferred(struct rq * rq)771 static void schedule_deferred(struct rq *rq)
772 {
773 	lockdep_assert_rq_held(rq);
774 
775 	/*
776 	 * If in the middle of waking up a task, task_woken_scx() will be called
777 	 * afterwards which will then run the deferred actions, no need to
778 	 * schedule anything.
779 	 */
780 	if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
781 		return;
782 
783 	/*
784 	 * If in balance, the balance callbacks will be called before rq lock is
785 	 * released. Schedule one.
786 	 */
787 	if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
788 		queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
789 				       deferred_bal_cb_workfn);
790 		return;
791 	}
792 
793 	/*
794 	 * No scheduler hooks available. Queue an irq work. They are executed on
795 	 * IRQ re-enable which may take a bit longer than the scheduler hooks.
796 	 * The above WAKEUP and BALANCE paths should cover most of the cases and
797 	 * the time to IRQ re-enable shouldn't be long.
798 	 */
799 	irq_work_queue(&rq->scx.deferred_irq_work);
800 }
801 
802 /**
803  * touch_core_sched - Update timestamp used for core-sched task ordering
804  * @rq: rq to read clock from, must be locked
805  * @p: task to update the timestamp for
806  *
807  * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
808  * implement global or local-DSQ FIFO ordering for core-sched. Should be called
809  * when a task becomes runnable and its turn on the CPU ends (e.g. slice
810  * exhaustion).
811  */
touch_core_sched(struct rq * rq,struct task_struct * p)812 static void touch_core_sched(struct rq *rq, struct task_struct *p)
813 {
814 	lockdep_assert_rq_held(rq);
815 
816 #ifdef CONFIG_SCHED_CORE
817 	/*
818 	 * It's okay to update the timestamp spuriously. Use
819 	 * sched_core_disabled() which is cheaper than enabled().
820 	 *
821 	 * As this is used to determine ordering between tasks of sibling CPUs,
822 	 * it may be better to use per-core dispatch sequence instead.
823 	 */
824 	if (!sched_core_disabled())
825 		p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
826 #endif
827 }
828 
829 /**
830  * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
831  * @rq: rq to read clock from, must be locked
832  * @p: task being dispatched
833  *
834  * If the BPF scheduler implements custom core-sched ordering via
835  * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
836  * ordering within each local DSQ. This function is called from dispatch paths
837  * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
838  */
touch_core_sched_dispatch(struct rq * rq,struct task_struct * p)839 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
840 {
841 	lockdep_assert_rq_held(rq);
842 
843 #ifdef CONFIG_SCHED_CORE
844 	if (unlikely(SCX_HAS_OP(scx_root, core_sched_before)))
845 		touch_core_sched(rq, p);
846 #endif
847 }
848 
update_curr_scx(struct rq * rq)849 static void update_curr_scx(struct rq *rq)
850 {
851 	struct task_struct *curr = rq->curr;
852 	s64 delta_exec;
853 
854 	delta_exec = update_curr_common(rq);
855 	if (unlikely(delta_exec <= 0))
856 		return;
857 
858 	if (curr->scx.slice != SCX_SLICE_INF) {
859 		curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
860 		if (!curr->scx.slice)
861 			touch_core_sched(rq, curr);
862 	}
863 }
864 
scx_dsq_priq_less(struct rb_node * node_a,const struct rb_node * node_b)865 static bool scx_dsq_priq_less(struct rb_node *node_a,
866 			      const struct rb_node *node_b)
867 {
868 	const struct task_struct *a =
869 		container_of(node_a, struct task_struct, scx.dsq_priq);
870 	const struct task_struct *b =
871 		container_of(node_b, struct task_struct, scx.dsq_priq);
872 
873 	return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
874 }
875 
dsq_mod_nr(struct scx_dispatch_q * dsq,s32 delta)876 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
877 {
878 	/* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
879 	WRITE_ONCE(dsq->nr, dsq->nr + delta);
880 }
881 
refill_task_slice_dfl(struct scx_sched * sch,struct task_struct * p)882 static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p)
883 {
884 	p->scx.slice = SCX_SLICE_DFL;
885 	__scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1);
886 }
887 
dispatch_enqueue(struct scx_sched * sch,struct scx_dispatch_q * dsq,struct task_struct * p,u64 enq_flags)888 static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
889 			     struct task_struct *p, u64 enq_flags)
890 {
891 	bool is_local = dsq->id == SCX_DSQ_LOCAL;
892 
893 	WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
894 	WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
895 		     !RB_EMPTY_NODE(&p->scx.dsq_priq));
896 
897 	if (!is_local) {
898 		raw_spin_lock(&dsq->lock);
899 		if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
900 			scx_error(sch, "attempting to dispatch to a destroyed dsq");
901 			/* fall back to the global dsq */
902 			raw_spin_unlock(&dsq->lock);
903 			dsq = find_global_dsq(sch, p);
904 			raw_spin_lock(&dsq->lock);
905 		}
906 	}
907 
908 	if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
909 		     (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
910 		/*
911 		 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
912 		 * their FIFO queues. To avoid confusion and accidentally
913 		 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
914 		 * disallow any internal DSQ from doing vtime ordering of
915 		 * tasks.
916 		 */
917 		scx_error(sch, "cannot use vtime ordering for built-in DSQs");
918 		enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
919 	}
920 
921 	if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
922 		struct rb_node *rbp;
923 
924 		/*
925 		 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
926 		 * linked to both the rbtree and list on PRIQs, this can only be
927 		 * tested easily when adding the first task.
928 		 */
929 		if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
930 			     nldsq_next_task(dsq, NULL, false)))
931 			scx_error(sch, "DSQ ID 0x%016llx already had FIFO-enqueued tasks",
932 				  dsq->id);
933 
934 		p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
935 		rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
936 
937 		/*
938 		 * Find the previous task and insert after it on the list so
939 		 * that @dsq->list is vtime ordered.
940 		 */
941 		rbp = rb_prev(&p->scx.dsq_priq);
942 		if (rbp) {
943 			struct task_struct *prev =
944 				container_of(rbp, struct task_struct,
945 					     scx.dsq_priq);
946 			list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
947 		} else {
948 			list_add(&p->scx.dsq_list.node, &dsq->list);
949 		}
950 	} else {
951 		/* a FIFO DSQ shouldn't be using PRIQ enqueuing */
952 		if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
953 			scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
954 				  dsq->id);
955 
956 		if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
957 			list_add(&p->scx.dsq_list.node, &dsq->list);
958 		else
959 			list_add_tail(&p->scx.dsq_list.node, &dsq->list);
960 	}
961 
962 	/* seq records the order tasks are queued, used by BPF DSQ iterator */
963 	dsq->seq++;
964 	p->scx.dsq_seq = dsq->seq;
965 
966 	dsq_mod_nr(dsq, 1);
967 	p->scx.dsq = dsq;
968 
969 	/*
970 	 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
971 	 * direct dispatch path, but we clear them here because the direct
972 	 * dispatch verdict may be overridden on the enqueue path during e.g.
973 	 * bypass.
974 	 */
975 	p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
976 	p->scx.ddsp_enq_flags = 0;
977 
978 	/*
979 	 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
980 	 * match waiters' load_acquire.
981 	 */
982 	if (enq_flags & SCX_ENQ_CLEAR_OPSS)
983 		atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
984 
985 	if (is_local) {
986 		struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
987 		bool preempt = false;
988 
989 		if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
990 		    rq->curr->sched_class == &ext_sched_class) {
991 			rq->curr->scx.slice = 0;
992 			preempt = true;
993 		}
994 
995 		if (preempt || sched_class_above(&ext_sched_class,
996 						 rq->curr->sched_class))
997 			resched_curr(rq);
998 	} else {
999 		raw_spin_unlock(&dsq->lock);
1000 	}
1001 }
1002 
task_unlink_from_dsq(struct task_struct * p,struct scx_dispatch_q * dsq)1003 static void task_unlink_from_dsq(struct task_struct *p,
1004 				 struct scx_dispatch_q *dsq)
1005 {
1006 	WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
1007 
1008 	if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
1009 		rb_erase(&p->scx.dsq_priq, &dsq->priq);
1010 		RB_CLEAR_NODE(&p->scx.dsq_priq);
1011 		p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1012 	}
1013 
1014 	list_del_init(&p->scx.dsq_list.node);
1015 	dsq_mod_nr(dsq, -1);
1016 }
1017 
dispatch_dequeue(struct rq * rq,struct task_struct * p)1018 static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
1019 {
1020 	struct scx_dispatch_q *dsq = p->scx.dsq;
1021 	bool is_local = dsq == &rq->scx.local_dsq;
1022 
1023 	if (!dsq) {
1024 		/*
1025 		 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
1026 		 * Unlinking is all that's needed to cancel.
1027 		 */
1028 		if (unlikely(!list_empty(&p->scx.dsq_list.node)))
1029 			list_del_init(&p->scx.dsq_list.node);
1030 
1031 		/*
1032 		 * When dispatching directly from the BPF scheduler to a local
1033 		 * DSQ, the task isn't associated with any DSQ but
1034 		 * @p->scx.holding_cpu may be set under the protection of
1035 		 * %SCX_OPSS_DISPATCHING.
1036 		 */
1037 		if (p->scx.holding_cpu >= 0)
1038 			p->scx.holding_cpu = -1;
1039 
1040 		return;
1041 	}
1042 
1043 	if (!is_local)
1044 		raw_spin_lock(&dsq->lock);
1045 
1046 	/*
1047 	 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1048 	 * change underneath us.
1049 	*/
1050 	if (p->scx.holding_cpu < 0) {
1051 		/* @p must still be on @dsq, dequeue */
1052 		task_unlink_from_dsq(p, dsq);
1053 	} else {
1054 		/*
1055 		 * We're racing against dispatch_to_local_dsq() which already
1056 		 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1057 		 * holding_cpu which tells dispatch_to_local_dsq() that it lost
1058 		 * the race.
1059 		 */
1060 		WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
1061 		p->scx.holding_cpu = -1;
1062 	}
1063 	p->scx.dsq = NULL;
1064 
1065 	if (!is_local)
1066 		raw_spin_unlock(&dsq->lock);
1067 }
1068 
find_dsq_for_dispatch(struct scx_sched * sch,struct rq * rq,u64 dsq_id,struct task_struct * p)1069 static struct scx_dispatch_q *find_dsq_for_dispatch(struct scx_sched *sch,
1070 						    struct rq *rq, u64 dsq_id,
1071 						    struct task_struct *p)
1072 {
1073 	struct scx_dispatch_q *dsq;
1074 
1075 	if (dsq_id == SCX_DSQ_LOCAL)
1076 		return &rq->scx.local_dsq;
1077 
1078 	if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
1079 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
1080 
1081 		if (!ops_cpu_valid(sch, cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
1082 			return find_global_dsq(sch, p);
1083 
1084 		return &cpu_rq(cpu)->scx.local_dsq;
1085 	}
1086 
1087 	if (dsq_id == SCX_DSQ_GLOBAL)
1088 		dsq = find_global_dsq(sch, p);
1089 	else
1090 		dsq = find_user_dsq(sch, dsq_id);
1091 
1092 	if (unlikely(!dsq)) {
1093 		scx_error(sch, "non-existent DSQ 0x%llx for %s[%d]",
1094 			  dsq_id, p->comm, p->pid);
1095 		return find_global_dsq(sch, p);
1096 	}
1097 
1098 	return dsq;
1099 }
1100 
mark_direct_dispatch(struct scx_sched * sch,struct task_struct * ddsp_task,struct task_struct * p,u64 dsq_id,u64 enq_flags)1101 static void mark_direct_dispatch(struct scx_sched *sch,
1102 				 struct task_struct *ddsp_task,
1103 				 struct task_struct *p, u64 dsq_id,
1104 				 u64 enq_flags)
1105 {
1106 	/*
1107 	 * Mark that dispatch already happened from ops.select_cpu() or
1108 	 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
1109 	 * which can never match a valid task pointer.
1110 	 */
1111 	__this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
1112 
1113 	/* @p must match the task on the enqueue path */
1114 	if (unlikely(p != ddsp_task)) {
1115 		if (IS_ERR(ddsp_task))
1116 			scx_error(sch, "%s[%d] already direct-dispatched",
1117 				  p->comm, p->pid);
1118 		else
1119 			scx_error(sch, "scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
1120 				  ddsp_task->comm, ddsp_task->pid,
1121 				  p->comm, p->pid);
1122 		return;
1123 	}
1124 
1125 	WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
1126 	WARN_ON_ONCE(p->scx.ddsp_enq_flags);
1127 
1128 	p->scx.ddsp_dsq_id = dsq_id;
1129 	p->scx.ddsp_enq_flags = enq_flags;
1130 }
1131 
direct_dispatch(struct scx_sched * sch,struct task_struct * p,u64 enq_flags)1132 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
1133 			    u64 enq_flags)
1134 {
1135 	struct rq *rq = task_rq(p);
1136 	struct scx_dispatch_q *dsq =
1137 		find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
1138 
1139 	touch_core_sched_dispatch(rq, p);
1140 
1141 	p->scx.ddsp_enq_flags |= enq_flags;
1142 
1143 	/*
1144 	 * We are in the enqueue path with @rq locked and pinned, and thus can't
1145 	 * double lock a remote rq and enqueue to its local DSQ. For
1146 	 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
1147 	 * the enqueue so that it's executed when @rq can be unlocked.
1148 	 */
1149 	if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
1150 		unsigned long opss;
1151 
1152 		opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
1153 
1154 		switch (opss & SCX_OPSS_STATE_MASK) {
1155 		case SCX_OPSS_NONE:
1156 			break;
1157 		case SCX_OPSS_QUEUEING:
1158 			/*
1159 			 * As @p was never passed to the BPF side, _release is
1160 			 * not strictly necessary. Still do it for consistency.
1161 			 */
1162 			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1163 			break;
1164 		default:
1165 			WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
1166 				  p->comm, p->pid, opss);
1167 			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1168 			break;
1169 		}
1170 
1171 		WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1172 		list_add_tail(&p->scx.dsq_list.node,
1173 			      &rq->scx.ddsp_deferred_locals);
1174 		schedule_deferred(rq);
1175 		return;
1176 	}
1177 
1178 	dispatch_enqueue(sch, dsq, p,
1179 			 p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1180 }
1181 
scx_rq_online(struct rq * rq)1182 static bool scx_rq_online(struct rq *rq)
1183 {
1184 	/*
1185 	 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
1186 	 * the online state as seen from the BPF scheduler. cpu_active() test
1187 	 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
1188 	 * stay set until the current scheduling operation is complete even if
1189 	 * we aren't locking @rq.
1190 	 */
1191 	return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
1192 }
1193 
do_enqueue_task(struct rq * rq,struct task_struct * p,u64 enq_flags,int sticky_cpu)1194 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
1195 			    int sticky_cpu)
1196 {
1197 	struct scx_sched *sch = scx_root;
1198 	struct task_struct **ddsp_taskp;
1199 	unsigned long qseq;
1200 
1201 	WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
1202 
1203 	/* rq migration */
1204 	if (sticky_cpu == cpu_of(rq))
1205 		goto local_norefill;
1206 
1207 	/*
1208 	 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
1209 	 * is offline and are just running the hotplug path. Don't bother the
1210 	 * BPF scheduler.
1211 	 */
1212 	if (!scx_rq_online(rq))
1213 		goto local;
1214 
1215 	if (scx_rq_bypassing(rq)) {
1216 		__scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
1217 		goto global;
1218 	}
1219 
1220 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1221 		goto direct;
1222 
1223 	/* see %SCX_OPS_ENQ_EXITING */
1224 	if (!(sch->ops.flags & SCX_OPS_ENQ_EXITING) &&
1225 	    unlikely(p->flags & PF_EXITING)) {
1226 		__scx_add_event(sch, SCX_EV_ENQ_SKIP_EXITING, 1);
1227 		goto local;
1228 	}
1229 
1230 	/* see %SCX_OPS_ENQ_MIGRATION_DISABLED */
1231 	if (!(sch->ops.flags & SCX_OPS_ENQ_MIGRATION_DISABLED) &&
1232 	    is_migration_disabled(p)) {
1233 		__scx_add_event(sch, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1);
1234 		goto local;
1235 	}
1236 
1237 	if (unlikely(!SCX_HAS_OP(sch, enqueue)))
1238 		goto global;
1239 
1240 	/* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
1241 	qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
1242 
1243 	WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
1244 	atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
1245 
1246 	ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
1247 	WARN_ON_ONCE(*ddsp_taskp);
1248 	*ddsp_taskp = p;
1249 
1250 	SCX_CALL_OP_TASK(sch, SCX_KF_ENQUEUE, enqueue, rq, p, enq_flags);
1251 
1252 	*ddsp_taskp = NULL;
1253 	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1254 		goto direct;
1255 
1256 	/*
1257 	 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
1258 	 * dequeue may be waiting. The store_release matches their load_acquire.
1259 	 */
1260 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
1261 	return;
1262 
1263 direct:
1264 	direct_dispatch(sch, p, enq_flags);
1265 	return;
1266 
1267 local:
1268 	/*
1269 	 * For task-ordering, slice refill must be treated as implying the end
1270 	 * of the current slice. Otherwise, the longer @p stays on the CPU, the
1271 	 * higher priority it becomes from scx_prio_less()'s POV.
1272 	 */
1273 	touch_core_sched(rq, p);
1274 	refill_task_slice_dfl(sch, p);
1275 local_norefill:
1276 	dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags);
1277 	return;
1278 
1279 global:
1280 	touch_core_sched(rq, p);	/* see the comment in local: */
1281 	refill_task_slice_dfl(sch, p);
1282 	dispatch_enqueue(sch, find_global_dsq(sch, p), p, enq_flags);
1283 }
1284 
task_runnable(const struct task_struct * p)1285 static bool task_runnable(const struct task_struct *p)
1286 {
1287 	return !list_empty(&p->scx.runnable_node);
1288 }
1289 
set_task_runnable(struct rq * rq,struct task_struct * p)1290 static void set_task_runnable(struct rq *rq, struct task_struct *p)
1291 {
1292 	lockdep_assert_rq_held(rq);
1293 
1294 	if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
1295 		p->scx.runnable_at = jiffies;
1296 		p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
1297 	}
1298 
1299 	/*
1300 	 * list_add_tail() must be used. scx_bypass() depends on tasks being
1301 	 * appended to the runnable_list.
1302 	 */
1303 	list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
1304 }
1305 
clr_task_runnable(struct task_struct * p,bool reset_runnable_at)1306 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
1307 {
1308 	list_del_init(&p->scx.runnable_node);
1309 	if (reset_runnable_at)
1310 		p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
1311 }
1312 
enqueue_task_scx(struct rq * rq,struct task_struct * p,int enq_flags)1313 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
1314 {
1315 	struct scx_sched *sch = scx_root;
1316 	int sticky_cpu = p->scx.sticky_cpu;
1317 
1318 	if (enq_flags & ENQUEUE_WAKEUP)
1319 		rq->scx.flags |= SCX_RQ_IN_WAKEUP;
1320 
1321 	enq_flags |= rq->scx.extra_enq_flags;
1322 
1323 	if (sticky_cpu >= 0)
1324 		p->scx.sticky_cpu = -1;
1325 
1326 	/*
1327 	 * Restoring a running task will be immediately followed by
1328 	 * set_next_task_scx() which expects the task to not be on the BPF
1329 	 * scheduler as tasks can only start running through local DSQs. Force
1330 	 * direct-dispatch into the local DSQ by setting the sticky_cpu.
1331 	 */
1332 	if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
1333 		sticky_cpu = cpu_of(rq);
1334 
1335 	if (p->scx.flags & SCX_TASK_QUEUED) {
1336 		WARN_ON_ONCE(!task_runnable(p));
1337 		goto out;
1338 	}
1339 
1340 	set_task_runnable(rq, p);
1341 	p->scx.flags |= SCX_TASK_QUEUED;
1342 	rq->scx.nr_running++;
1343 	add_nr_running(rq, 1);
1344 
1345 	if (SCX_HAS_OP(sch, runnable) && !task_on_rq_migrating(p))
1346 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, runnable, rq, p, enq_flags);
1347 
1348 	if (enq_flags & SCX_ENQ_WAKEUP)
1349 		touch_core_sched(rq, p);
1350 
1351 	do_enqueue_task(rq, p, enq_flags, sticky_cpu);
1352 out:
1353 	rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
1354 
1355 	if ((enq_flags & SCX_ENQ_CPU_SELECTED) &&
1356 	    unlikely(cpu_of(rq) != p->scx.selected_cpu))
1357 		__scx_add_event(sch, SCX_EV_SELECT_CPU_FALLBACK, 1);
1358 }
1359 
ops_dequeue(struct rq * rq,struct task_struct * p,u64 deq_flags)1360 static void ops_dequeue(struct rq *rq, struct task_struct *p, u64 deq_flags)
1361 {
1362 	struct scx_sched *sch = scx_root;
1363 	unsigned long opss;
1364 
1365 	/* dequeue is always temporary, don't reset runnable_at */
1366 	clr_task_runnable(p, false);
1367 
1368 	/* acquire ensures that we see the preceding updates on QUEUED */
1369 	opss = atomic_long_read_acquire(&p->scx.ops_state);
1370 
1371 	switch (opss & SCX_OPSS_STATE_MASK) {
1372 	case SCX_OPSS_NONE:
1373 		break;
1374 	case SCX_OPSS_QUEUEING:
1375 		/*
1376 		 * QUEUEING is started and finished while holding @p's rq lock.
1377 		 * As we're holding the rq lock now, we shouldn't see QUEUEING.
1378 		 */
1379 		BUG();
1380 	case SCX_OPSS_QUEUED:
1381 		if (SCX_HAS_OP(sch, dequeue))
1382 			SCX_CALL_OP_TASK(sch, SCX_KF_REST, dequeue, rq,
1383 					 p, deq_flags);
1384 
1385 		if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
1386 					    SCX_OPSS_NONE))
1387 			break;
1388 		fallthrough;
1389 	case SCX_OPSS_DISPATCHING:
1390 		/*
1391 		 * If @p is being dispatched from the BPF scheduler to a DSQ,
1392 		 * wait for the transfer to complete so that @p doesn't get
1393 		 * added to its DSQ after dequeueing is complete.
1394 		 *
1395 		 * As we're waiting on DISPATCHING with the rq locked, the
1396 		 * dispatching side shouldn't try to lock the rq while
1397 		 * DISPATCHING is set. See dispatch_to_local_dsq().
1398 		 *
1399 		 * DISPATCHING shouldn't have qseq set and control can reach
1400 		 * here with NONE @opss from the above QUEUED case block.
1401 		 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
1402 		 */
1403 		wait_ops_state(p, SCX_OPSS_DISPATCHING);
1404 		BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
1405 		break;
1406 	}
1407 }
1408 
dequeue_task_scx(struct rq * rq,struct task_struct * p,int deq_flags)1409 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
1410 {
1411 	struct scx_sched *sch = scx_root;
1412 
1413 	if (!(p->scx.flags & SCX_TASK_QUEUED)) {
1414 		WARN_ON_ONCE(task_runnable(p));
1415 		return true;
1416 	}
1417 
1418 	ops_dequeue(rq, p, deq_flags);
1419 
1420 	/*
1421 	 * A currently running task which is going off @rq first gets dequeued
1422 	 * and then stops running. As we want running <-> stopping transitions
1423 	 * to be contained within runnable <-> quiescent transitions, trigger
1424 	 * ->stopping() early here instead of in put_prev_task_scx().
1425 	 *
1426 	 * @p may go through multiple stopping <-> running transitions between
1427 	 * here and put_prev_task_scx() if task attribute changes occur while
1428 	 * balance_scx() leaves @rq unlocked. However, they don't contain any
1429 	 * information meaningful to the BPF scheduler and can be suppressed by
1430 	 * skipping the callbacks if the task is !QUEUED.
1431 	 */
1432 	if (SCX_HAS_OP(sch, stopping) && task_current(rq, p)) {
1433 		update_curr_scx(rq);
1434 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, false);
1435 	}
1436 
1437 	if (SCX_HAS_OP(sch, quiescent) && !task_on_rq_migrating(p))
1438 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, quiescent, rq, p, deq_flags);
1439 
1440 	if (deq_flags & SCX_DEQ_SLEEP)
1441 		p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
1442 	else
1443 		p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
1444 
1445 	p->scx.flags &= ~SCX_TASK_QUEUED;
1446 	rq->scx.nr_running--;
1447 	sub_nr_running(rq, 1);
1448 
1449 	dispatch_dequeue(rq, p);
1450 	return true;
1451 }
1452 
yield_task_scx(struct rq * rq)1453 static void yield_task_scx(struct rq *rq)
1454 {
1455 	struct scx_sched *sch = scx_root;
1456 	struct task_struct *p = rq->curr;
1457 
1458 	if (SCX_HAS_OP(sch, yield))
1459 		SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, p, NULL);
1460 	else
1461 		p->scx.slice = 0;
1462 }
1463 
yield_to_task_scx(struct rq * rq,struct task_struct * to)1464 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
1465 {
1466 	struct scx_sched *sch = scx_root;
1467 	struct task_struct *from = rq->curr;
1468 
1469 	if (SCX_HAS_OP(sch, yield))
1470 		return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq,
1471 					      from, to);
1472 	else
1473 		return false;
1474 }
1475 
move_local_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct rq * dst_rq)1476 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
1477 					 struct scx_dispatch_q *src_dsq,
1478 					 struct rq *dst_rq)
1479 {
1480 	struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
1481 
1482 	/* @dsq is locked and @p is on @dst_rq */
1483 	lockdep_assert_held(&src_dsq->lock);
1484 	lockdep_assert_rq_held(dst_rq);
1485 
1486 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
1487 
1488 	if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
1489 		list_add(&p->scx.dsq_list.node, &dst_dsq->list);
1490 	else
1491 		list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
1492 
1493 	dsq_mod_nr(dst_dsq, 1);
1494 	p->scx.dsq = dst_dsq;
1495 }
1496 
1497 /**
1498  * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
1499  * @p: task to move
1500  * @enq_flags: %SCX_ENQ_*
1501  * @src_rq: rq to move the task from, locked on entry, released on return
1502  * @dst_rq: rq to move the task into, locked on return
1503  *
1504  * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
1505  */
move_remote_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct rq * src_rq,struct rq * dst_rq)1506 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
1507 					  struct rq *src_rq, struct rq *dst_rq)
1508 {
1509 	lockdep_assert_rq_held(src_rq);
1510 
1511 	/* the following marks @p MIGRATING which excludes dequeue */
1512 	deactivate_task(src_rq, p, 0);
1513 	set_task_cpu(p, cpu_of(dst_rq));
1514 	p->scx.sticky_cpu = cpu_of(dst_rq);
1515 
1516 	raw_spin_rq_unlock(src_rq);
1517 	raw_spin_rq_lock(dst_rq);
1518 
1519 	/*
1520 	 * We want to pass scx-specific enq_flags but activate_task() will
1521 	 * truncate the upper 32 bit. As we own @rq, we can pass them through
1522 	 * @rq->scx.extra_enq_flags instead.
1523 	 */
1524 	WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
1525 	WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
1526 	dst_rq->scx.extra_enq_flags = enq_flags;
1527 	activate_task(dst_rq, p, 0);
1528 	dst_rq->scx.extra_enq_flags = 0;
1529 }
1530 
1531 /*
1532  * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
1533  * differences:
1534  *
1535  * - is_cpu_allowed() asks "Can this task run on this CPU?" while
1536  *   task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
1537  *   this CPU?".
1538  *
1539  *   While migration is disabled, is_cpu_allowed() has to say "yes" as the task
1540  *   must be allowed to finish on the CPU that it's currently on regardless of
1541  *   the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
1542  *   BPF scheduler shouldn't attempt to migrate a task which has migration
1543  *   disabled.
1544  *
1545  * - The BPF scheduler is bypassed while the rq is offline and we can always say
1546  *   no to the BPF scheduler initiated migrations while offline.
1547  *
1548  * The caller must ensure that @p and @rq are on different CPUs.
1549  */
task_can_run_on_remote_rq(struct scx_sched * sch,struct task_struct * p,struct rq * rq,bool enforce)1550 static bool task_can_run_on_remote_rq(struct scx_sched *sch,
1551 				      struct task_struct *p, struct rq *rq,
1552 				      bool enforce)
1553 {
1554 	int cpu = cpu_of(rq);
1555 
1556 	WARN_ON_ONCE(task_cpu(p) == cpu);
1557 
1558 	/*
1559 	 * If @p has migration disabled, @p->cpus_ptr is updated to contain only
1560 	 * the pinned CPU in migrate_disable_switch() while @p is being switched
1561 	 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is
1562 	 * updated and thus another CPU may see @p on a DSQ inbetween leading to
1563 	 * @p passing the below task_allowed_on_cpu() check while migration is
1564 	 * disabled.
1565 	 *
1566 	 * Test the migration disabled state first as the race window is narrow
1567 	 * and the BPF scheduler failing to check migration disabled state can
1568 	 * easily be masked if task_allowed_on_cpu() is done first.
1569 	 */
1570 	if (unlikely(is_migration_disabled(p))) {
1571 		if (enforce)
1572 			scx_error(sch, "SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
1573 				  p->comm, p->pid, task_cpu(p), cpu);
1574 		return false;
1575 	}
1576 
1577 	/*
1578 	 * We don't require the BPF scheduler to avoid dispatching to offline
1579 	 * CPUs mostly for convenience but also because CPUs can go offline
1580 	 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the
1581 	 * picked CPU is outside the allowed mask.
1582 	 */
1583 	if (!task_allowed_on_cpu(p, cpu)) {
1584 		if (enforce)
1585 			scx_error(sch, "SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
1586 				  cpu, p->comm, p->pid);
1587 		return false;
1588 	}
1589 
1590 	if (!scx_rq_online(rq)) {
1591 		if (enforce)
1592 			__scx_add_event(sch, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1);
1593 		return false;
1594 	}
1595 
1596 	return true;
1597 }
1598 
1599 /**
1600  * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
1601  * @p: target task
1602  * @dsq: locked DSQ @p is currently on
1603  * @src_rq: rq @p is currently on, stable with @dsq locked
1604  *
1605  * Called with @dsq locked but no rq's locked. We want to move @p to a different
1606  * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
1607  * required when transferring into a local DSQ. Even when transferring into a
1608  * non-local DSQ, it's better to use the same mechanism to protect against
1609  * dequeues and maintain the invariant that @p->scx.dsq can only change while
1610  * @src_rq is locked, which e.g. scx_dump_task() depends on.
1611  *
1612  * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
1613  * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
1614  * this may race with dequeue, which can't drop the rq lock or fail, do a little
1615  * dancing from our side.
1616  *
1617  * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
1618  * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
1619  * would be cleared to -1. While other cpus may have updated it to different
1620  * values afterwards, as this operation can't be preempted or recurse, the
1621  * holding_cpu can never become this CPU again before we're done. Thus, we can
1622  * tell whether we lost to dequeue by testing whether the holding_cpu still
1623  * points to this CPU. See dispatch_dequeue() for the counterpart.
1624  *
1625  * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
1626  * still valid. %false if lost to dequeue.
1627  */
unlink_dsq_and_lock_src_rq(struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)1628 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
1629 				       struct scx_dispatch_q *dsq,
1630 				       struct rq *src_rq)
1631 {
1632 	s32 cpu = raw_smp_processor_id();
1633 
1634 	lockdep_assert_held(&dsq->lock);
1635 
1636 	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
1637 	task_unlink_from_dsq(p, dsq);
1638 	p->scx.holding_cpu = cpu;
1639 
1640 	raw_spin_unlock(&dsq->lock);
1641 	raw_spin_rq_lock(src_rq);
1642 
1643 	/* task_rq couldn't have changed if we're still the holding cpu */
1644 	return likely(p->scx.holding_cpu == cpu) &&
1645 		!WARN_ON_ONCE(src_rq != task_rq(p));
1646 }
1647 
consume_remote_task(struct rq * this_rq,struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)1648 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
1649 				struct scx_dispatch_q *dsq, struct rq *src_rq)
1650 {
1651 	raw_spin_rq_unlock(this_rq);
1652 
1653 	if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
1654 		move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
1655 		return true;
1656 	} else {
1657 		raw_spin_rq_unlock(src_rq);
1658 		raw_spin_rq_lock(this_rq);
1659 		return false;
1660 	}
1661 }
1662 
1663 /**
1664  * move_task_between_dsqs() - Move a task from one DSQ to another
1665  * @sch: scx_sched being operated on
1666  * @p: target task
1667  * @enq_flags: %SCX_ENQ_*
1668  * @src_dsq: DSQ @p is currently on, must not be a local DSQ
1669  * @dst_dsq: DSQ @p is being moved to, can be any DSQ
1670  *
1671  * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
1672  * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
1673  * will change. As @p's task_rq is locked, this function doesn't need to use the
1674  * holding_cpu mechanism.
1675  *
1676  * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
1677  * return value, is locked.
1678  */
move_task_between_dsqs(struct scx_sched * sch,struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct scx_dispatch_q * dst_dsq)1679 static struct rq *move_task_between_dsqs(struct scx_sched *sch,
1680 					 struct task_struct *p, u64 enq_flags,
1681 					 struct scx_dispatch_q *src_dsq,
1682 					 struct scx_dispatch_q *dst_dsq)
1683 {
1684 	struct rq *src_rq = task_rq(p), *dst_rq;
1685 
1686 	BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
1687 	lockdep_assert_held(&src_dsq->lock);
1688 	lockdep_assert_rq_held(src_rq);
1689 
1690 	if (dst_dsq->id == SCX_DSQ_LOCAL) {
1691 		dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
1692 		if (src_rq != dst_rq &&
1693 		    unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
1694 			dst_dsq = find_global_dsq(sch, p);
1695 			dst_rq = src_rq;
1696 		}
1697 	} else {
1698 		/* no need to migrate if destination is a non-local DSQ */
1699 		dst_rq = src_rq;
1700 	}
1701 
1702 	/*
1703 	 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
1704 	 * CPU, @p will be migrated.
1705 	 */
1706 	if (dst_dsq->id == SCX_DSQ_LOCAL) {
1707 		/* @p is going from a non-local DSQ to a local DSQ */
1708 		if (src_rq == dst_rq) {
1709 			task_unlink_from_dsq(p, src_dsq);
1710 			move_local_task_to_local_dsq(p, enq_flags,
1711 						     src_dsq, dst_rq);
1712 			raw_spin_unlock(&src_dsq->lock);
1713 		} else {
1714 			raw_spin_unlock(&src_dsq->lock);
1715 			move_remote_task_to_local_dsq(p, enq_flags,
1716 						      src_rq, dst_rq);
1717 		}
1718 	} else {
1719 		/*
1720 		 * @p is going from a non-local DSQ to a non-local DSQ. As
1721 		 * $src_dsq is already locked, do an abbreviated dequeue.
1722 		 */
1723 		task_unlink_from_dsq(p, src_dsq);
1724 		p->scx.dsq = NULL;
1725 		raw_spin_unlock(&src_dsq->lock);
1726 
1727 		dispatch_enqueue(sch, dst_dsq, p, enq_flags);
1728 	}
1729 
1730 	return dst_rq;
1731 }
1732 
1733 /*
1734  * A poorly behaving BPF scheduler can live-lock the system by e.g. incessantly
1735  * banging on the same DSQ on a large NUMA system to the point where switching
1736  * to the bypass mode can take a long time. Inject artificial delays while the
1737  * bypass mode is switching to guarantee timely completion.
1738  */
scx_breather(struct rq * rq)1739 static void scx_breather(struct rq *rq)
1740 {
1741 	u64 until;
1742 
1743 	lockdep_assert_rq_held(rq);
1744 
1745 	if (likely(!atomic_read(&scx_breather_depth)))
1746 		return;
1747 
1748 	raw_spin_rq_unlock(rq);
1749 
1750 	until = ktime_get_ns() + NSEC_PER_MSEC;
1751 
1752 	do {
1753 		int cnt = 1024;
1754 		while (atomic_read(&scx_breather_depth) && --cnt)
1755 			cpu_relax();
1756 	} while (atomic_read(&scx_breather_depth) &&
1757 		 time_before64(ktime_get_ns(), until));
1758 
1759 	raw_spin_rq_lock(rq);
1760 }
1761 
consume_dispatch_q(struct scx_sched * sch,struct rq * rq,struct scx_dispatch_q * dsq)1762 static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq,
1763 			       struct scx_dispatch_q *dsq)
1764 {
1765 	struct task_struct *p;
1766 retry:
1767 	/*
1768 	 * This retry loop can repeatedly race against scx_bypass() dequeueing
1769 	 * tasks from @dsq trying to put the system into the bypass mode. On
1770 	 * some multi-socket machines (e.g. 2x Intel 8480c), this can live-lock
1771 	 * the machine into soft lockups. Give a breather.
1772 	 */
1773 	scx_breather(rq);
1774 
1775 	/*
1776 	 * The caller can't expect to successfully consume a task if the task's
1777 	 * addition to @dsq isn't guaranteed to be visible somehow. Test
1778 	 * @dsq->list without locking and skip if it seems empty.
1779 	 */
1780 	if (list_empty(&dsq->list))
1781 		return false;
1782 
1783 	raw_spin_lock(&dsq->lock);
1784 
1785 	nldsq_for_each_task(p, dsq) {
1786 		struct rq *task_rq = task_rq(p);
1787 
1788 		if (rq == task_rq) {
1789 			task_unlink_from_dsq(p, dsq);
1790 			move_local_task_to_local_dsq(p, 0, dsq, rq);
1791 			raw_spin_unlock(&dsq->lock);
1792 			return true;
1793 		}
1794 
1795 		if (task_can_run_on_remote_rq(sch, p, rq, false)) {
1796 			if (likely(consume_remote_task(rq, p, dsq, task_rq)))
1797 				return true;
1798 			goto retry;
1799 		}
1800 	}
1801 
1802 	raw_spin_unlock(&dsq->lock);
1803 	return false;
1804 }
1805 
consume_global_dsq(struct scx_sched * sch,struct rq * rq)1806 static bool consume_global_dsq(struct scx_sched *sch, struct rq *rq)
1807 {
1808 	int node = cpu_to_node(cpu_of(rq));
1809 
1810 	return consume_dispatch_q(sch, rq, sch->global_dsqs[node]);
1811 }
1812 
1813 /**
1814  * dispatch_to_local_dsq - Dispatch a task to a local dsq
1815  * @sch: scx_sched being operated on
1816  * @rq: current rq which is locked
1817  * @dst_dsq: destination DSQ
1818  * @p: task to dispatch
1819  * @enq_flags: %SCX_ENQ_*
1820  *
1821  * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
1822  * DSQ. This function performs all the synchronization dancing needed because
1823  * local DSQs are protected with rq locks.
1824  *
1825  * The caller must have exclusive ownership of @p (e.g. through
1826  * %SCX_OPSS_DISPATCHING).
1827  */
dispatch_to_local_dsq(struct scx_sched * sch,struct rq * rq,struct scx_dispatch_q * dst_dsq,struct task_struct * p,u64 enq_flags)1828 static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
1829 				  struct scx_dispatch_q *dst_dsq,
1830 				  struct task_struct *p, u64 enq_flags)
1831 {
1832 	struct rq *src_rq = task_rq(p);
1833 	struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
1834 	struct rq *locked_rq = rq;
1835 
1836 	/*
1837 	 * We're synchronized against dequeue through DISPATCHING. As @p can't
1838 	 * be dequeued, its task_rq and cpus_allowed are stable too.
1839 	 *
1840 	 * If dispatching to @rq that @p is already on, no lock dancing needed.
1841 	 */
1842 	if (rq == src_rq && rq == dst_rq) {
1843 		dispatch_enqueue(sch, dst_dsq, p,
1844 				 enq_flags | SCX_ENQ_CLEAR_OPSS);
1845 		return;
1846 	}
1847 
1848 	if (src_rq != dst_rq &&
1849 	    unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
1850 		dispatch_enqueue(sch, find_global_dsq(sch, p), p,
1851 				 enq_flags | SCX_ENQ_CLEAR_OPSS);
1852 		return;
1853 	}
1854 
1855 	/*
1856 	 * @p is on a possibly remote @src_rq which we need to lock to move the
1857 	 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
1858 	 * on DISPATCHING, so we can't grab @src_rq lock while holding
1859 	 * DISPATCHING.
1860 	 *
1861 	 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
1862 	 * we're moving from a DSQ and use the same mechanism - mark the task
1863 	 * under transfer with holding_cpu, release DISPATCHING and then follow
1864 	 * the same protocol. See unlink_dsq_and_lock_src_rq().
1865 	 */
1866 	p->scx.holding_cpu = raw_smp_processor_id();
1867 
1868 	/* store_release ensures that dequeue sees the above */
1869 	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1870 
1871 	/* switch to @src_rq lock */
1872 	if (locked_rq != src_rq) {
1873 		raw_spin_rq_unlock(locked_rq);
1874 		locked_rq = src_rq;
1875 		raw_spin_rq_lock(src_rq);
1876 	}
1877 
1878 	/* task_rq couldn't have changed if we're still the holding cpu */
1879 	if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
1880 	    !WARN_ON_ONCE(src_rq != task_rq(p))) {
1881 		/*
1882 		 * If @p is staying on the same rq, there's no need to go
1883 		 * through the full deactivate/activate cycle. Optimize by
1884 		 * abbreviating move_remote_task_to_local_dsq().
1885 		 */
1886 		if (src_rq == dst_rq) {
1887 			p->scx.holding_cpu = -1;
1888 			dispatch_enqueue(sch, &dst_rq->scx.local_dsq, p,
1889 					 enq_flags);
1890 		} else {
1891 			move_remote_task_to_local_dsq(p, enq_flags,
1892 						      src_rq, dst_rq);
1893 			/* task has been moved to dst_rq, which is now locked */
1894 			locked_rq = dst_rq;
1895 		}
1896 
1897 		/* if the destination CPU is idle, wake it up */
1898 		if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
1899 			resched_curr(dst_rq);
1900 	}
1901 
1902 	/* switch back to @rq lock */
1903 	if (locked_rq != rq) {
1904 		raw_spin_rq_unlock(locked_rq);
1905 		raw_spin_rq_lock(rq);
1906 	}
1907 }
1908 
1909 /**
1910  * finish_dispatch - Asynchronously finish dispatching a task
1911  * @rq: current rq which is locked
1912  * @p: task to finish dispatching
1913  * @qseq_at_dispatch: qseq when @p started getting dispatched
1914  * @dsq_id: destination DSQ ID
1915  * @enq_flags: %SCX_ENQ_*
1916  *
1917  * Dispatching to local DSQs may need to wait for queueing to complete or
1918  * require rq lock dancing. As we don't wanna do either while inside
1919  * ops.dispatch() to avoid locking order inversion, we split dispatching into
1920  * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the
1921  * task and its qseq. Once ops.dispatch() returns, this function is called to
1922  * finish up.
1923  *
1924  * There is no guarantee that @p is still valid for dispatching or even that it
1925  * was valid in the first place. Make sure that the task is still owned by the
1926  * BPF scheduler and claim the ownership before dispatching.
1927  */
finish_dispatch(struct scx_sched * sch,struct rq * rq,struct task_struct * p,unsigned long qseq_at_dispatch,u64 dsq_id,u64 enq_flags)1928 static void finish_dispatch(struct scx_sched *sch, struct rq *rq,
1929 			    struct task_struct *p,
1930 			    unsigned long qseq_at_dispatch,
1931 			    u64 dsq_id, u64 enq_flags)
1932 {
1933 	struct scx_dispatch_q *dsq;
1934 	unsigned long opss;
1935 
1936 	touch_core_sched_dispatch(rq, p);
1937 retry:
1938 	/*
1939 	 * No need for _acquire here. @p is accessed only after a successful
1940 	 * try_cmpxchg to DISPATCHING.
1941 	 */
1942 	opss = atomic_long_read(&p->scx.ops_state);
1943 
1944 	switch (opss & SCX_OPSS_STATE_MASK) {
1945 	case SCX_OPSS_DISPATCHING:
1946 	case SCX_OPSS_NONE:
1947 		/* someone else already got to it */
1948 		return;
1949 	case SCX_OPSS_QUEUED:
1950 		/*
1951 		 * If qseq doesn't match, @p has gone through at least one
1952 		 * dispatch/dequeue and re-enqueue cycle between
1953 		 * scx_bpf_dsq_insert() and here and we have no claim on it.
1954 		 */
1955 		if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
1956 			return;
1957 
1958 		/*
1959 		 * While we know @p is accessible, we don't yet have a claim on
1960 		 * it - the BPF scheduler is allowed to dispatch tasks
1961 		 * spuriously and there can be a racing dequeue attempt. Let's
1962 		 * claim @p by atomically transitioning it from QUEUED to
1963 		 * DISPATCHING.
1964 		 */
1965 		if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
1966 						   SCX_OPSS_DISPATCHING)))
1967 			break;
1968 		goto retry;
1969 	case SCX_OPSS_QUEUEING:
1970 		/*
1971 		 * do_enqueue_task() is in the process of transferring the task
1972 		 * to the BPF scheduler while holding @p's rq lock. As we aren't
1973 		 * holding any kernel or BPF resource that the enqueue path may
1974 		 * depend upon, it's safe to wait.
1975 		 */
1976 		wait_ops_state(p, opss);
1977 		goto retry;
1978 	}
1979 
1980 	BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
1981 
1982 	dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, p);
1983 
1984 	if (dsq->id == SCX_DSQ_LOCAL)
1985 		dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
1986 	else
1987 		dispatch_enqueue(sch, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
1988 }
1989 
flush_dispatch_buf(struct scx_sched * sch,struct rq * rq)1990 static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
1991 {
1992 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
1993 	u32 u;
1994 
1995 	for (u = 0; u < dspc->cursor; u++) {
1996 		struct scx_dsp_buf_ent *ent = &dspc->buf[u];
1997 
1998 		finish_dispatch(sch, rq, ent->task, ent->qseq, ent->dsq_id,
1999 				ent->enq_flags);
2000 	}
2001 
2002 	dspc->nr_tasks += dspc->cursor;
2003 	dspc->cursor = 0;
2004 }
2005 
balance_one(struct rq * rq,struct task_struct * prev)2006 static int balance_one(struct rq *rq, struct task_struct *prev)
2007 {
2008 	struct scx_sched *sch = scx_root;
2009 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2010 	bool prev_on_scx = prev->sched_class == &ext_sched_class;
2011 	bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED;
2012 	int nr_loops = SCX_DSP_MAX_LOOPS;
2013 
2014 	lockdep_assert_rq_held(rq);
2015 	rq->scx.flags |= SCX_RQ_IN_BALANCE;
2016 	rq->scx.flags &= ~(SCX_RQ_BAL_PENDING | SCX_RQ_BAL_KEEP);
2017 
2018 	if ((sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT) &&
2019 	    unlikely(rq->scx.cpu_released)) {
2020 		/*
2021 		 * If the previous sched_class for the current CPU was not SCX,
2022 		 * notify the BPF scheduler that it again has control of the
2023 		 * core. This callback complements ->cpu_release(), which is
2024 		 * emitted in switch_class().
2025 		 */
2026 		if (SCX_HAS_OP(sch, cpu_acquire))
2027 			SCX_CALL_OP(sch, SCX_KF_REST, cpu_acquire, rq,
2028 				    cpu_of(rq), NULL);
2029 		rq->scx.cpu_released = false;
2030 	}
2031 
2032 	if (prev_on_scx) {
2033 		update_curr_scx(rq);
2034 
2035 		/*
2036 		 * If @prev is runnable & has slice left, it has priority and
2037 		 * fetching more just increases latency for the fetched tasks.
2038 		 * Tell pick_task_scx() to keep running @prev. If the BPF
2039 		 * scheduler wants to handle this explicitly, it should
2040 		 * implement ->cpu_release().
2041 		 *
2042 		 * See scx_disable_workfn() for the explanation on the bypassing
2043 		 * test.
2044 		 */
2045 		if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) {
2046 			rq->scx.flags |= SCX_RQ_BAL_KEEP;
2047 			goto has_tasks;
2048 		}
2049 	}
2050 
2051 	/* if there already are tasks to run, nothing to do */
2052 	if (rq->scx.local_dsq.nr)
2053 		goto has_tasks;
2054 
2055 	if (consume_global_dsq(sch, rq))
2056 		goto has_tasks;
2057 
2058 	if (unlikely(!SCX_HAS_OP(sch, dispatch)) ||
2059 	    scx_rq_bypassing(rq) || !scx_rq_online(rq))
2060 		goto no_tasks;
2061 
2062 	dspc->rq = rq;
2063 
2064 	/*
2065 	 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
2066 	 * the local DSQ might still end up empty after a successful
2067 	 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
2068 	 * produced some tasks, retry. The BPF scheduler may depend on this
2069 	 * looping behavior to simplify its implementation.
2070 	 */
2071 	do {
2072 		dspc->nr_tasks = 0;
2073 
2074 		SCX_CALL_OP(sch, SCX_KF_DISPATCH, dispatch, rq,
2075 			    cpu_of(rq), prev_on_scx ? prev : NULL);
2076 
2077 		flush_dispatch_buf(sch, rq);
2078 
2079 		if (prev_on_rq && prev->scx.slice) {
2080 			rq->scx.flags |= SCX_RQ_BAL_KEEP;
2081 			goto has_tasks;
2082 		}
2083 		if (rq->scx.local_dsq.nr)
2084 			goto has_tasks;
2085 		if (consume_global_dsq(sch, rq))
2086 			goto has_tasks;
2087 
2088 		/*
2089 		 * ops.dispatch() can trap us in this loop by repeatedly
2090 		 * dispatching ineligible tasks. Break out once in a while to
2091 		 * allow the watchdog to run. As IRQ can't be enabled in
2092 		 * balance(), we want to complete this scheduling cycle and then
2093 		 * start a new one. IOW, we want to call resched_curr() on the
2094 		 * next, most likely idle, task, not the current one. Use
2095 		 * scx_kick_cpu() for deferred kicking.
2096 		 */
2097 		if (unlikely(!--nr_loops)) {
2098 			scx_kick_cpu(sch, cpu_of(rq), 0);
2099 			break;
2100 		}
2101 	} while (dspc->nr_tasks);
2102 
2103 no_tasks:
2104 	/*
2105 	 * Didn't find another task to run. Keep running @prev unless
2106 	 * %SCX_OPS_ENQ_LAST is in effect.
2107 	 */
2108 	if (prev_on_rq &&
2109 	    (!(sch->ops.flags & SCX_OPS_ENQ_LAST) || scx_rq_bypassing(rq))) {
2110 		rq->scx.flags |= SCX_RQ_BAL_KEEP;
2111 		__scx_add_event(sch, SCX_EV_DISPATCH_KEEP_LAST, 1);
2112 		goto has_tasks;
2113 	}
2114 	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2115 	return false;
2116 
2117 has_tasks:
2118 	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2119 	return true;
2120 }
2121 
balance_scx(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)2122 static int balance_scx(struct rq *rq, struct task_struct *prev,
2123 		       struct rq_flags *rf)
2124 {
2125 	int ret;
2126 
2127 	rq_unpin_lock(rq, rf);
2128 
2129 	ret = balance_one(rq, prev);
2130 
2131 #ifdef CONFIG_SCHED_SMT
2132 	/*
2133 	 * When core-sched is enabled, this ops.balance() call will be followed
2134 	 * by pick_task_scx() on this CPU and the SMT siblings. Balance the
2135 	 * siblings too.
2136 	 */
2137 	if (sched_core_enabled(rq)) {
2138 		const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
2139 		int scpu;
2140 
2141 		for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) {
2142 			struct rq *srq = cpu_rq(scpu);
2143 			struct task_struct *sprev = srq->curr;
2144 
2145 			WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq));
2146 			update_rq_clock(srq);
2147 			balance_one(srq, sprev);
2148 		}
2149 	}
2150 #endif
2151 	rq_repin_lock(rq, rf);
2152 
2153 	return ret;
2154 }
2155 
process_ddsp_deferred_locals(struct rq * rq)2156 static void process_ddsp_deferred_locals(struct rq *rq)
2157 {
2158 	struct task_struct *p;
2159 
2160 	lockdep_assert_rq_held(rq);
2161 
2162 	/*
2163 	 * Now that @rq can be unlocked, execute the deferred enqueueing of
2164 	 * tasks directly dispatched to the local DSQs of other CPUs. See
2165 	 * direct_dispatch(). Keep popping from the head instead of using
2166 	 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
2167 	 * temporarily.
2168 	 */
2169 	while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
2170 				struct task_struct, scx.dsq_list.node))) {
2171 		struct scx_sched *sch = scx_root;
2172 		struct scx_dispatch_q *dsq;
2173 
2174 		list_del_init(&p->scx.dsq_list.node);
2175 
2176 		dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
2177 		if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
2178 			dispatch_to_local_dsq(sch, rq, dsq, p,
2179 					      p->scx.ddsp_enq_flags);
2180 	}
2181 }
2182 
set_next_task_scx(struct rq * rq,struct task_struct * p,bool first)2183 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
2184 {
2185 	struct scx_sched *sch = scx_root;
2186 
2187 	if (p->scx.flags & SCX_TASK_QUEUED) {
2188 		/*
2189 		 * Core-sched might decide to execute @p before it is
2190 		 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
2191 		 */
2192 		ops_dequeue(rq, p, SCX_DEQ_CORE_SCHED_EXEC);
2193 		dispatch_dequeue(rq, p);
2194 	}
2195 
2196 	p->se.exec_start = rq_clock_task(rq);
2197 
2198 	/* see dequeue_task_scx() on why we skip when !QUEUED */
2199 	if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED))
2200 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, running, rq, p);
2201 
2202 	clr_task_runnable(p, true);
2203 
2204 	/*
2205 	 * @p is getting newly scheduled or got kicked after someone updated its
2206 	 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
2207 	 */
2208 	if ((p->scx.slice == SCX_SLICE_INF) !=
2209 	    (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
2210 		if (p->scx.slice == SCX_SLICE_INF)
2211 			rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
2212 		else
2213 			rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
2214 
2215 		sched_update_tick_dependency(rq);
2216 
2217 		/*
2218 		 * For now, let's refresh the load_avgs just when transitioning
2219 		 * in and out of nohz. In the future, we might want to add a
2220 		 * mechanism which calls the following periodically on
2221 		 * tick-stopped CPUs.
2222 		 */
2223 		update_other_load_avgs(rq);
2224 	}
2225 }
2226 
2227 static enum scx_cpu_preempt_reason
preempt_reason_from_class(const struct sched_class * class)2228 preempt_reason_from_class(const struct sched_class *class)
2229 {
2230 	if (class == &stop_sched_class)
2231 		return SCX_CPU_PREEMPT_STOP;
2232 	if (class == &dl_sched_class)
2233 		return SCX_CPU_PREEMPT_DL;
2234 	if (class == &rt_sched_class)
2235 		return SCX_CPU_PREEMPT_RT;
2236 	return SCX_CPU_PREEMPT_UNKNOWN;
2237 }
2238 
switch_class(struct rq * rq,struct task_struct * next)2239 static void switch_class(struct rq *rq, struct task_struct *next)
2240 {
2241 	struct scx_sched *sch = scx_root;
2242 	const struct sched_class *next_class = next->sched_class;
2243 
2244 	/*
2245 	 * Pairs with the smp_load_acquire() issued by a CPU in
2246 	 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
2247 	 * resched.
2248 	 */
2249 	smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
2250 	if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT))
2251 		return;
2252 
2253 	/*
2254 	 * The callback is conceptually meant to convey that the CPU is no
2255 	 * longer under the control of SCX. Therefore, don't invoke the callback
2256 	 * if the next class is below SCX (in which case the BPF scheduler has
2257 	 * actively decided not to schedule any tasks on the CPU).
2258 	 */
2259 	if (sched_class_above(&ext_sched_class, next_class))
2260 		return;
2261 
2262 	/*
2263 	 * At this point we know that SCX was preempted by a higher priority
2264 	 * sched_class, so invoke the ->cpu_release() callback if we have not
2265 	 * done so already. We only send the callback once between SCX being
2266 	 * preempted, and it regaining control of the CPU.
2267 	 *
2268 	 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
2269 	 *  next time that balance_scx() is invoked.
2270 	 */
2271 	if (!rq->scx.cpu_released) {
2272 		if (SCX_HAS_OP(sch, cpu_release)) {
2273 			struct scx_cpu_release_args args = {
2274 				.reason = preempt_reason_from_class(next_class),
2275 				.task = next,
2276 			};
2277 
2278 			SCX_CALL_OP(sch, SCX_KF_CPU_RELEASE, cpu_release, rq,
2279 				    cpu_of(rq), &args);
2280 		}
2281 		rq->scx.cpu_released = true;
2282 	}
2283 }
2284 
put_prev_task_scx(struct rq * rq,struct task_struct * p,struct task_struct * next)2285 static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
2286 			      struct task_struct *next)
2287 {
2288 	struct scx_sched *sch = scx_root;
2289 	update_curr_scx(rq);
2290 
2291 	/* see dequeue_task_scx() on why we skip when !QUEUED */
2292 	if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED))
2293 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, true);
2294 
2295 	if (p->scx.flags & SCX_TASK_QUEUED) {
2296 		set_task_runnable(rq, p);
2297 
2298 		/*
2299 		 * If @p has slice left and is being put, @p is getting
2300 		 * preempted by a higher priority scheduler class or core-sched
2301 		 * forcing a different task. Leave it at the head of the local
2302 		 * DSQ.
2303 		 */
2304 		if (p->scx.slice && !scx_rq_bypassing(rq)) {
2305 			dispatch_enqueue(sch, &rq->scx.local_dsq, p,
2306 					 SCX_ENQ_HEAD);
2307 			goto switch_class;
2308 		}
2309 
2310 		/*
2311 		 * If @p is runnable but we're about to enter a lower
2312 		 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
2313 		 * ops.enqueue() that @p is the only one available for this cpu,
2314 		 * which should trigger an explicit follow-up scheduling event.
2315 		 */
2316 		if (sched_class_above(&ext_sched_class, next->sched_class)) {
2317 			WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST));
2318 			do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
2319 		} else {
2320 			do_enqueue_task(rq, p, 0, -1);
2321 		}
2322 	}
2323 
2324 switch_class:
2325 	if (next && next->sched_class != &ext_sched_class)
2326 		switch_class(rq, next);
2327 }
2328 
first_local_task(struct rq * rq)2329 static struct task_struct *first_local_task(struct rq *rq)
2330 {
2331 	return list_first_entry_or_null(&rq->scx.local_dsq.list,
2332 					struct task_struct, scx.dsq_list.node);
2333 }
2334 
pick_task_scx(struct rq * rq)2335 static struct task_struct *pick_task_scx(struct rq *rq)
2336 {
2337 	struct task_struct *prev = rq->curr;
2338 	struct task_struct *p;
2339 	bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
2340 	bool kick_idle = false;
2341 
2342 	/*
2343 	 * WORKAROUND:
2344 	 *
2345 	 * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just
2346 	 * have gone through balance_scx(). Unfortunately, there currently is a
2347 	 * bug where fair could say yes on balance() but no on pick_task(),
2348 	 * which then ends up calling pick_task_scx() without preceding
2349 	 * balance_scx().
2350 	 *
2351 	 * Keep running @prev if possible and avoid stalling from entering idle
2352 	 * without balancing.
2353 	 *
2354 	 * Once fair is fixed, remove the workaround and trigger WARN_ON_ONCE()
2355 	 * if pick_task_scx() is called without preceding balance_scx().
2356 	 */
2357 	if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
2358 		if (prev->scx.flags & SCX_TASK_QUEUED) {
2359 			keep_prev = true;
2360 		} else {
2361 			keep_prev = false;
2362 			kick_idle = true;
2363 		}
2364 	} else if (unlikely(keep_prev &&
2365 			    prev->sched_class != &ext_sched_class)) {
2366 		/*
2367 		 * Can happen while enabling as SCX_RQ_BAL_PENDING assertion is
2368 		 * conditional on scx_enabled() and may have been skipped.
2369 		 */
2370 		WARN_ON_ONCE(scx_enable_state() == SCX_ENABLED);
2371 		keep_prev = false;
2372 	}
2373 
2374 	/*
2375 	 * If balance_scx() is telling us to keep running @prev, replenish slice
2376 	 * if necessary and keep running @prev. Otherwise, pop the first one
2377 	 * from the local DSQ.
2378 	 */
2379 	if (keep_prev) {
2380 		p = prev;
2381 		if (!p->scx.slice)
2382 			refill_task_slice_dfl(rcu_dereference_sched(scx_root), p);
2383 	} else {
2384 		p = first_local_task(rq);
2385 		if (!p) {
2386 			if (kick_idle)
2387 				scx_kick_cpu(rcu_dereference_sched(scx_root),
2388 					     cpu_of(rq), SCX_KICK_IDLE);
2389 			return NULL;
2390 		}
2391 
2392 		if (unlikely(!p->scx.slice)) {
2393 			struct scx_sched *sch = rcu_dereference_sched(scx_root);
2394 
2395 			if (!scx_rq_bypassing(rq) && !sch->warned_zero_slice) {
2396 				printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n",
2397 						p->comm, p->pid, __func__);
2398 				sch->warned_zero_slice = true;
2399 			}
2400 			refill_task_slice_dfl(sch, p);
2401 		}
2402 	}
2403 
2404 	return p;
2405 }
2406 
2407 #ifdef CONFIG_SCHED_CORE
2408 /**
2409  * scx_prio_less - Task ordering for core-sched
2410  * @a: task A
2411  * @b: task B
2412  * @in_fi: in forced idle state
2413  *
2414  * Core-sched is implemented as an additional scheduling layer on top of the
2415  * usual sched_class'es and needs to find out the expected task ordering. For
2416  * SCX, core-sched calls this function to interrogate the task ordering.
2417  *
2418  * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
2419  * to implement the default task ordering. The older the timestamp, the higher
2420  * priority the task - the global FIFO ordering matching the default scheduling
2421  * behavior.
2422  *
2423  * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
2424  * implement FIFO ordering within each local DSQ. See pick_task_scx().
2425  */
scx_prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)2426 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
2427 		   bool in_fi)
2428 {
2429 	struct scx_sched *sch = scx_root;
2430 
2431 	/*
2432 	 * The const qualifiers are dropped from task_struct pointers when
2433 	 * calling ops.core_sched_before(). Accesses are controlled by the
2434 	 * verifier.
2435 	 */
2436 	if (SCX_HAS_OP(sch, core_sched_before) &&
2437 	    !scx_rq_bypassing(task_rq(a)))
2438 		return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, core_sched_before,
2439 					      NULL,
2440 					      (struct task_struct *)a,
2441 					      (struct task_struct *)b);
2442 	else
2443 		return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
2444 }
2445 #endif	/* CONFIG_SCHED_CORE */
2446 
select_task_rq_scx(struct task_struct * p,int prev_cpu,int wake_flags)2447 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
2448 {
2449 	struct scx_sched *sch = scx_root;
2450 	bool rq_bypass;
2451 
2452 	/*
2453 	 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
2454 	 * can be a good migration opportunity with low cache and memory
2455 	 * footprint. Returning a CPU different than @prev_cpu triggers
2456 	 * immediate rq migration. However, for SCX, as the current rq
2457 	 * association doesn't dictate where the task is going to run, this
2458 	 * doesn't fit well. If necessary, we can later add a dedicated method
2459 	 * which can decide to preempt self to force it through the regular
2460 	 * scheduling path.
2461 	 */
2462 	if (unlikely(wake_flags & WF_EXEC))
2463 		return prev_cpu;
2464 
2465 	rq_bypass = scx_rq_bypassing(task_rq(p));
2466 	if (likely(SCX_HAS_OP(sch, select_cpu)) && !rq_bypass) {
2467 		s32 cpu;
2468 		struct task_struct **ddsp_taskp;
2469 
2470 		ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
2471 		WARN_ON_ONCE(*ddsp_taskp);
2472 		*ddsp_taskp = p;
2473 
2474 		cpu = SCX_CALL_OP_TASK_RET(sch,
2475 					   SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
2476 					   select_cpu, NULL, p, prev_cpu,
2477 					   wake_flags);
2478 		p->scx.selected_cpu = cpu;
2479 		*ddsp_taskp = NULL;
2480 		if (ops_cpu_valid(sch, cpu, "from ops.select_cpu()"))
2481 			return cpu;
2482 		else
2483 			return prev_cpu;
2484 	} else {
2485 		s32 cpu;
2486 
2487 		cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0);
2488 		if (cpu >= 0) {
2489 			refill_task_slice_dfl(sch, p);
2490 			p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
2491 		} else {
2492 			cpu = prev_cpu;
2493 		}
2494 		p->scx.selected_cpu = cpu;
2495 
2496 		if (rq_bypass)
2497 			__scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
2498 		return cpu;
2499 	}
2500 }
2501 
task_woken_scx(struct rq * rq,struct task_struct * p)2502 static void task_woken_scx(struct rq *rq, struct task_struct *p)
2503 {
2504 	run_deferred(rq);
2505 }
2506 
set_cpus_allowed_scx(struct task_struct * p,struct affinity_context * ac)2507 static void set_cpus_allowed_scx(struct task_struct *p,
2508 				 struct affinity_context *ac)
2509 {
2510 	struct scx_sched *sch = scx_root;
2511 
2512 	set_cpus_allowed_common(p, ac);
2513 
2514 	/*
2515 	 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
2516 	 * differ from the configured one in @p->cpus_mask. Always tell the bpf
2517 	 * scheduler the effective one.
2518 	 *
2519 	 * Fine-grained memory write control is enforced by BPF making the const
2520 	 * designation pointless. Cast it away when calling the operation.
2521 	 */
2522 	if (SCX_HAS_OP(sch, set_cpumask))
2523 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, NULL,
2524 				 p, (struct cpumask *)p->cpus_ptr);
2525 }
2526 
handle_hotplug(struct rq * rq,bool online)2527 static void handle_hotplug(struct rq *rq, bool online)
2528 {
2529 	struct scx_sched *sch = scx_root;
2530 	int cpu = cpu_of(rq);
2531 
2532 	atomic_long_inc(&scx_hotplug_seq);
2533 
2534 	/*
2535 	 * scx_root updates are protected by cpus_read_lock() and will stay
2536 	 * stable here. Note that we can't depend on scx_enabled() test as the
2537 	 * hotplug ops need to be enabled before __scx_enabled is set.
2538 	 */
2539 	if (unlikely(!sch))
2540 		return;
2541 
2542 	if (scx_enabled())
2543 		scx_idle_update_selcpu_topology(&sch->ops);
2544 
2545 	if (online && SCX_HAS_OP(sch, cpu_online))
2546 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_online, NULL, cpu);
2547 	else if (!online && SCX_HAS_OP(sch, cpu_offline))
2548 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_offline, NULL, cpu);
2549 	else
2550 		scx_exit(sch, SCX_EXIT_UNREG_KERN,
2551 			 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
2552 			 "cpu %d going %s, exiting scheduler", cpu,
2553 			 online ? "online" : "offline");
2554 }
2555 
scx_rq_activate(struct rq * rq)2556 void scx_rq_activate(struct rq *rq)
2557 {
2558 	handle_hotplug(rq, true);
2559 }
2560 
scx_rq_deactivate(struct rq * rq)2561 void scx_rq_deactivate(struct rq *rq)
2562 {
2563 	handle_hotplug(rq, false);
2564 }
2565 
rq_online_scx(struct rq * rq)2566 static void rq_online_scx(struct rq *rq)
2567 {
2568 	rq->scx.flags |= SCX_RQ_ONLINE;
2569 }
2570 
rq_offline_scx(struct rq * rq)2571 static void rq_offline_scx(struct rq *rq)
2572 {
2573 	rq->scx.flags &= ~SCX_RQ_ONLINE;
2574 }
2575 
2576 
check_rq_for_timeouts(struct rq * rq)2577 static bool check_rq_for_timeouts(struct rq *rq)
2578 {
2579 	struct scx_sched *sch;
2580 	struct task_struct *p;
2581 	struct rq_flags rf;
2582 	bool timed_out = false;
2583 
2584 	rq_lock_irqsave(rq, &rf);
2585 	sch = rcu_dereference_bh(scx_root);
2586 	if (unlikely(!sch))
2587 		goto out_unlock;
2588 
2589 	list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
2590 		unsigned long last_runnable = p->scx.runnable_at;
2591 
2592 		if (unlikely(time_after(jiffies,
2593 					last_runnable + scx_watchdog_timeout))) {
2594 			u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
2595 
2596 			scx_exit(sch, SCX_EXIT_ERROR_STALL, 0,
2597 				 "%s[%d] failed to run for %u.%03us",
2598 				 p->comm, p->pid, dur_ms / 1000, dur_ms % 1000);
2599 			timed_out = true;
2600 			break;
2601 		}
2602 	}
2603 out_unlock:
2604 	rq_unlock_irqrestore(rq, &rf);
2605 	return timed_out;
2606 }
2607 
scx_watchdog_workfn(struct work_struct * work)2608 static void scx_watchdog_workfn(struct work_struct *work)
2609 {
2610 	int cpu;
2611 
2612 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
2613 
2614 	for_each_online_cpu(cpu) {
2615 		if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
2616 			break;
2617 
2618 		cond_resched();
2619 	}
2620 	queue_delayed_work(system_unbound_wq, to_delayed_work(work),
2621 			   scx_watchdog_timeout / 2);
2622 }
2623 
scx_tick(struct rq * rq)2624 void scx_tick(struct rq *rq)
2625 {
2626 	struct scx_sched *sch;
2627 	unsigned long last_check;
2628 
2629 	if (!scx_enabled())
2630 		return;
2631 
2632 	sch = rcu_dereference_bh(scx_root);
2633 	if (unlikely(!sch))
2634 		return;
2635 
2636 	last_check = READ_ONCE(scx_watchdog_timestamp);
2637 	if (unlikely(time_after(jiffies,
2638 				last_check + READ_ONCE(scx_watchdog_timeout)))) {
2639 		u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
2640 
2641 		scx_exit(sch, SCX_EXIT_ERROR_STALL, 0,
2642 			 "watchdog failed to check in for %u.%03us",
2643 			 dur_ms / 1000, dur_ms % 1000);
2644 	}
2645 
2646 	update_other_load_avgs(rq);
2647 }
2648 
task_tick_scx(struct rq * rq,struct task_struct * curr,int queued)2649 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
2650 {
2651 	struct scx_sched *sch = scx_root;
2652 
2653 	update_curr_scx(rq);
2654 
2655 	/*
2656 	 * While disabling, always resched and refresh core-sched timestamp as
2657 	 * we can't trust the slice management or ops.core_sched_before().
2658 	 */
2659 	if (scx_rq_bypassing(rq)) {
2660 		curr->scx.slice = 0;
2661 		touch_core_sched(rq, curr);
2662 	} else if (SCX_HAS_OP(sch, tick)) {
2663 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, tick, rq, curr);
2664 	}
2665 
2666 	if (!curr->scx.slice)
2667 		resched_curr(rq);
2668 }
2669 
2670 #ifdef CONFIG_EXT_GROUP_SCHED
tg_cgrp(struct task_group * tg)2671 static struct cgroup *tg_cgrp(struct task_group *tg)
2672 {
2673 	/*
2674 	 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
2675 	 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
2676 	 * root cgroup.
2677 	 */
2678 	if (tg && tg->css.cgroup)
2679 		return tg->css.cgroup;
2680 	else
2681 		return &cgrp_dfl_root.cgrp;
2682 }
2683 
2684 #define SCX_INIT_TASK_ARGS_CGROUP(tg)		.cgroup = tg_cgrp(tg),
2685 
2686 #else	/* CONFIG_EXT_GROUP_SCHED */
2687 
2688 #define SCX_INIT_TASK_ARGS_CGROUP(tg)
2689 
2690 #endif	/* CONFIG_EXT_GROUP_SCHED */
2691 
scx_get_task_state(const struct task_struct * p)2692 static enum scx_task_state scx_get_task_state(const struct task_struct *p)
2693 {
2694 	return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
2695 }
2696 
scx_set_task_state(struct task_struct * p,enum scx_task_state state)2697 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
2698 {
2699 	enum scx_task_state prev_state = scx_get_task_state(p);
2700 	bool warn = false;
2701 
2702 	BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
2703 
2704 	switch (state) {
2705 	case SCX_TASK_NONE:
2706 		break;
2707 	case SCX_TASK_INIT:
2708 		warn = prev_state != SCX_TASK_NONE;
2709 		break;
2710 	case SCX_TASK_READY:
2711 		warn = prev_state == SCX_TASK_NONE;
2712 		break;
2713 	case SCX_TASK_ENABLED:
2714 		warn = prev_state != SCX_TASK_READY;
2715 		break;
2716 	default:
2717 		warn = true;
2718 		return;
2719 	}
2720 
2721 	WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
2722 		  prev_state, state, p->comm, p->pid);
2723 
2724 	p->scx.flags &= ~SCX_TASK_STATE_MASK;
2725 	p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
2726 }
2727 
scx_init_task(struct task_struct * p,struct task_group * tg,bool fork)2728 static int scx_init_task(struct task_struct *p, struct task_group *tg, bool fork)
2729 {
2730 	struct scx_sched *sch = scx_root;
2731 	int ret;
2732 
2733 	p->scx.disallow = false;
2734 
2735 	if (SCX_HAS_OP(sch, init_task)) {
2736 		struct scx_init_task_args args = {
2737 			SCX_INIT_TASK_ARGS_CGROUP(tg)
2738 			.fork = fork,
2739 		};
2740 
2741 		ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init_task, NULL,
2742 				      p, &args);
2743 		if (unlikely(ret)) {
2744 			ret = ops_sanitize_err(sch, "init_task", ret);
2745 			return ret;
2746 		}
2747 	}
2748 
2749 	scx_set_task_state(p, SCX_TASK_INIT);
2750 
2751 	if (p->scx.disallow) {
2752 		if (!fork) {
2753 			struct rq *rq;
2754 			struct rq_flags rf;
2755 
2756 			rq = task_rq_lock(p, &rf);
2757 
2758 			/*
2759 			 * We're in the load path and @p->policy will be applied
2760 			 * right after. Reverting @p->policy here and rejecting
2761 			 * %SCHED_EXT transitions from scx_check_setscheduler()
2762 			 * guarantees that if ops.init_task() sets @p->disallow,
2763 			 * @p can never be in SCX.
2764 			 */
2765 			if (p->policy == SCHED_EXT) {
2766 				p->policy = SCHED_NORMAL;
2767 				atomic_long_inc(&scx_nr_rejected);
2768 			}
2769 
2770 			task_rq_unlock(rq, p, &rf);
2771 		} else if (p->policy == SCHED_EXT) {
2772 			scx_error(sch, "ops.init_task() set task->scx.disallow for %s[%d] during fork",
2773 				  p->comm, p->pid);
2774 		}
2775 	}
2776 
2777 	p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
2778 	return 0;
2779 }
2780 
scx_enable_task(struct task_struct * p)2781 static void scx_enable_task(struct task_struct *p)
2782 {
2783 	struct scx_sched *sch = scx_root;
2784 	struct rq *rq = task_rq(p);
2785 	u32 weight;
2786 
2787 	lockdep_assert_rq_held(rq);
2788 
2789 	/*
2790 	 * Set the weight before calling ops.enable() so that the scheduler
2791 	 * doesn't see a stale value if they inspect the task struct.
2792 	 */
2793 	if (task_has_idle_policy(p))
2794 		weight = WEIGHT_IDLEPRIO;
2795 	else
2796 		weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
2797 
2798 	p->scx.weight = sched_weight_to_cgroup(weight);
2799 
2800 	if (SCX_HAS_OP(sch, enable))
2801 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, enable, rq, p);
2802 	scx_set_task_state(p, SCX_TASK_ENABLED);
2803 
2804 	if (SCX_HAS_OP(sch, set_weight))
2805 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,
2806 				 p, p->scx.weight);
2807 }
2808 
scx_disable_task(struct task_struct * p)2809 static void scx_disable_task(struct task_struct *p)
2810 {
2811 	struct scx_sched *sch = scx_root;
2812 	struct rq *rq = task_rq(p);
2813 
2814 	lockdep_assert_rq_held(rq);
2815 	WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
2816 
2817 	if (SCX_HAS_OP(sch, disable))
2818 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p);
2819 	scx_set_task_state(p, SCX_TASK_READY);
2820 }
2821 
scx_exit_task(struct task_struct * p)2822 static void scx_exit_task(struct task_struct *p)
2823 {
2824 	struct scx_sched *sch = scx_root;
2825 	struct scx_exit_task_args args = {
2826 		.cancelled = false,
2827 	};
2828 
2829 	lockdep_assert_rq_held(task_rq(p));
2830 
2831 	switch (scx_get_task_state(p)) {
2832 	case SCX_TASK_NONE:
2833 		return;
2834 	case SCX_TASK_INIT:
2835 		args.cancelled = true;
2836 		break;
2837 	case SCX_TASK_READY:
2838 		break;
2839 	case SCX_TASK_ENABLED:
2840 		scx_disable_task(p);
2841 		break;
2842 	default:
2843 		WARN_ON_ONCE(true);
2844 		return;
2845 	}
2846 
2847 	if (SCX_HAS_OP(sch, exit_task))
2848 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, exit_task, task_rq(p),
2849 				 p, &args);
2850 	scx_set_task_state(p, SCX_TASK_NONE);
2851 }
2852 
init_scx_entity(struct sched_ext_entity * scx)2853 void init_scx_entity(struct sched_ext_entity *scx)
2854 {
2855 	memset(scx, 0, sizeof(*scx));
2856 	INIT_LIST_HEAD(&scx->dsq_list.node);
2857 	RB_CLEAR_NODE(&scx->dsq_priq);
2858 	scx->sticky_cpu = -1;
2859 	scx->holding_cpu = -1;
2860 	INIT_LIST_HEAD(&scx->runnable_node);
2861 	scx->runnable_at = jiffies;
2862 	scx->ddsp_dsq_id = SCX_DSQ_INVALID;
2863 	scx->slice = SCX_SLICE_DFL;
2864 }
2865 
scx_pre_fork(struct task_struct * p)2866 void scx_pre_fork(struct task_struct *p)
2867 {
2868 	/*
2869 	 * BPF scheduler enable/disable paths want to be able to iterate and
2870 	 * update all tasks which can become complex when racing forks. As
2871 	 * enable/disable are very cold paths, let's use a percpu_rwsem to
2872 	 * exclude forks.
2873 	 */
2874 	percpu_down_read(&scx_fork_rwsem);
2875 }
2876 
scx_fork(struct task_struct * p)2877 int scx_fork(struct task_struct *p)
2878 {
2879 	percpu_rwsem_assert_held(&scx_fork_rwsem);
2880 
2881 	if (scx_init_task_enabled)
2882 		return scx_init_task(p, task_group(p), true);
2883 	else
2884 		return 0;
2885 }
2886 
scx_post_fork(struct task_struct * p)2887 void scx_post_fork(struct task_struct *p)
2888 {
2889 	if (scx_init_task_enabled) {
2890 		scx_set_task_state(p, SCX_TASK_READY);
2891 
2892 		/*
2893 		 * Enable the task immediately if it's running on sched_ext.
2894 		 * Otherwise, it'll be enabled in switching_to_scx() if and
2895 		 * when it's ever configured to run with a SCHED_EXT policy.
2896 		 */
2897 		if (p->sched_class == &ext_sched_class) {
2898 			struct rq_flags rf;
2899 			struct rq *rq;
2900 
2901 			rq = task_rq_lock(p, &rf);
2902 			scx_enable_task(p);
2903 			task_rq_unlock(rq, p, &rf);
2904 		}
2905 	}
2906 
2907 	spin_lock_irq(&scx_tasks_lock);
2908 	list_add_tail(&p->scx.tasks_node, &scx_tasks);
2909 	spin_unlock_irq(&scx_tasks_lock);
2910 
2911 	percpu_up_read(&scx_fork_rwsem);
2912 }
2913 
scx_cancel_fork(struct task_struct * p)2914 void scx_cancel_fork(struct task_struct *p)
2915 {
2916 	if (scx_enabled()) {
2917 		struct rq *rq;
2918 		struct rq_flags rf;
2919 
2920 		rq = task_rq_lock(p, &rf);
2921 		WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
2922 		scx_exit_task(p);
2923 		task_rq_unlock(rq, p, &rf);
2924 	}
2925 
2926 	percpu_up_read(&scx_fork_rwsem);
2927 }
2928 
sched_ext_free(struct task_struct * p)2929 void sched_ext_free(struct task_struct *p)
2930 {
2931 	unsigned long flags;
2932 
2933 	spin_lock_irqsave(&scx_tasks_lock, flags);
2934 	list_del_init(&p->scx.tasks_node);
2935 	spin_unlock_irqrestore(&scx_tasks_lock, flags);
2936 
2937 	/*
2938 	 * @p is off scx_tasks and wholly ours. scx_enable()'s READY -> ENABLED
2939 	 * transitions can't race us. Disable ops for @p.
2940 	 */
2941 	if (scx_get_task_state(p) != SCX_TASK_NONE) {
2942 		struct rq_flags rf;
2943 		struct rq *rq;
2944 
2945 		rq = task_rq_lock(p, &rf);
2946 		scx_exit_task(p);
2947 		task_rq_unlock(rq, p, &rf);
2948 	}
2949 }
2950 
reweight_task_scx(struct rq * rq,struct task_struct * p,const struct load_weight * lw)2951 static void reweight_task_scx(struct rq *rq, struct task_struct *p,
2952 			      const struct load_weight *lw)
2953 {
2954 	struct scx_sched *sch = scx_root;
2955 
2956 	lockdep_assert_rq_held(task_rq(p));
2957 
2958 	p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
2959 	if (SCX_HAS_OP(sch, set_weight))
2960 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,
2961 				 p, p->scx.weight);
2962 }
2963 
prio_changed_scx(struct rq * rq,struct task_struct * p,int oldprio)2964 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
2965 {
2966 }
2967 
switching_to_scx(struct rq * rq,struct task_struct * p)2968 static void switching_to_scx(struct rq *rq, struct task_struct *p)
2969 {
2970 	struct scx_sched *sch = scx_root;
2971 
2972 	scx_enable_task(p);
2973 
2974 	/*
2975 	 * set_cpus_allowed_scx() is not called while @p is associated with a
2976 	 * different scheduler class. Keep the BPF scheduler up-to-date.
2977 	 */
2978 	if (SCX_HAS_OP(sch, set_cpumask))
2979 		SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, rq,
2980 				 p, (struct cpumask *)p->cpus_ptr);
2981 }
2982 
switched_from_scx(struct rq * rq,struct task_struct * p)2983 static void switched_from_scx(struct rq *rq, struct task_struct *p)
2984 {
2985 	scx_disable_task(p);
2986 }
2987 
wakeup_preempt_scx(struct rq * rq,struct task_struct * p,int wake_flags)2988 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
switched_to_scx(struct rq * rq,struct task_struct * p)2989 static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
2990 
scx_check_setscheduler(struct task_struct * p,int policy)2991 int scx_check_setscheduler(struct task_struct *p, int policy)
2992 {
2993 	lockdep_assert_rq_held(task_rq(p));
2994 
2995 	/* if disallow, reject transitioning into SCX */
2996 	if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
2997 	    p->policy != policy && policy == SCHED_EXT)
2998 		return -EACCES;
2999 
3000 	return 0;
3001 }
3002 
3003 #ifdef CONFIG_NO_HZ_FULL
scx_can_stop_tick(struct rq * rq)3004 bool scx_can_stop_tick(struct rq *rq)
3005 {
3006 	struct task_struct *p = rq->curr;
3007 
3008 	if (scx_rq_bypassing(rq))
3009 		return false;
3010 
3011 	if (p->sched_class != &ext_sched_class)
3012 		return true;
3013 
3014 	/*
3015 	 * @rq can dispatch from different DSQs, so we can't tell whether it
3016 	 * needs the tick or not by looking at nr_running. Allow stopping ticks
3017 	 * iff the BPF scheduler indicated so. See set_next_task_scx().
3018 	 */
3019 	return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
3020 }
3021 #endif
3022 
3023 #ifdef CONFIG_EXT_GROUP_SCHED
3024 
3025 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_ops_rwsem);
3026 static bool scx_cgroup_enabled;
3027 
scx_tg_init(struct task_group * tg)3028 void scx_tg_init(struct task_group *tg)
3029 {
3030 	tg->scx.weight = CGROUP_WEIGHT_DFL;
3031 	tg->scx.bw_period_us = default_bw_period_us();
3032 	tg->scx.bw_quota_us = RUNTIME_INF;
3033 }
3034 
scx_tg_online(struct task_group * tg)3035 int scx_tg_online(struct task_group *tg)
3036 {
3037 	struct scx_sched *sch = scx_root;
3038 	int ret = 0;
3039 
3040 	WARN_ON_ONCE(tg->scx.flags & (SCX_TG_ONLINE | SCX_TG_INITED));
3041 
3042 	if (scx_cgroup_enabled) {
3043 		if (SCX_HAS_OP(sch, cgroup_init)) {
3044 			struct scx_cgroup_init_args args =
3045 				{ .weight = tg->scx.weight,
3046 				  .bw_period_us = tg->scx.bw_period_us,
3047 				  .bw_quota_us = tg->scx.bw_quota_us,
3048 				  .bw_burst_us = tg->scx.bw_burst_us };
3049 
3050 			ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init,
3051 					      NULL, tg->css.cgroup, &args);
3052 			if (ret)
3053 				ret = ops_sanitize_err(sch, "cgroup_init", ret);
3054 		}
3055 		if (ret == 0)
3056 			tg->scx.flags |= SCX_TG_ONLINE | SCX_TG_INITED;
3057 	} else {
3058 		tg->scx.flags |= SCX_TG_ONLINE;
3059 	}
3060 
3061 	return ret;
3062 }
3063 
scx_tg_offline(struct task_group * tg)3064 void scx_tg_offline(struct task_group *tg)
3065 {
3066 	struct scx_sched *sch = scx_root;
3067 
3068 	WARN_ON_ONCE(!(tg->scx.flags & SCX_TG_ONLINE));
3069 
3070 	if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_exit) &&
3071 	    (tg->scx.flags & SCX_TG_INITED))
3072 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL,
3073 			    tg->css.cgroup);
3074 	tg->scx.flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
3075 }
3076 
scx_cgroup_can_attach(struct cgroup_taskset * tset)3077 int scx_cgroup_can_attach(struct cgroup_taskset *tset)
3078 {
3079 	struct scx_sched *sch = scx_root;
3080 	struct cgroup_subsys_state *css;
3081 	struct task_struct *p;
3082 	int ret;
3083 
3084 	if (!scx_cgroup_enabled)
3085 		return 0;
3086 
3087 	cgroup_taskset_for_each(p, css, tset) {
3088 		struct cgroup *from = tg_cgrp(task_group(p));
3089 		struct cgroup *to = tg_cgrp(css_tg(css));
3090 
3091 		WARN_ON_ONCE(p->scx.cgrp_moving_from);
3092 
3093 		/*
3094 		 * sched_move_task() omits identity migrations. Let's match the
3095 		 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
3096 		 * always match one-to-one.
3097 		 */
3098 		if (from == to)
3099 			continue;
3100 
3101 		if (SCX_HAS_OP(sch, cgroup_prep_move)) {
3102 			ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED,
3103 					      cgroup_prep_move, NULL,
3104 					      p, from, css->cgroup);
3105 			if (ret)
3106 				goto err;
3107 		}
3108 
3109 		p->scx.cgrp_moving_from = from;
3110 	}
3111 
3112 	return 0;
3113 
3114 err:
3115 	cgroup_taskset_for_each(p, css, tset) {
3116 		if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
3117 		    p->scx.cgrp_moving_from)
3118 			SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL,
3119 				    p, p->scx.cgrp_moving_from, css->cgroup);
3120 		p->scx.cgrp_moving_from = NULL;
3121 	}
3122 
3123 	return ops_sanitize_err(sch, "cgroup_prep_move", ret);
3124 }
3125 
scx_cgroup_move_task(struct task_struct * p)3126 void scx_cgroup_move_task(struct task_struct *p)
3127 {
3128 	struct scx_sched *sch = scx_root;
3129 
3130 	if (!scx_cgroup_enabled)
3131 		return;
3132 
3133 	/*
3134 	 * @p must have ops.cgroup_prep_move() called on it and thus
3135 	 * cgrp_moving_from set.
3136 	 */
3137 	if (SCX_HAS_OP(sch, cgroup_move) &&
3138 	    !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
3139 		SCX_CALL_OP_TASK(sch, SCX_KF_UNLOCKED, cgroup_move, NULL,
3140 				 p, p->scx.cgrp_moving_from,
3141 				 tg_cgrp(task_group(p)));
3142 	p->scx.cgrp_moving_from = NULL;
3143 }
3144 
scx_cgroup_cancel_attach(struct cgroup_taskset * tset)3145 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
3146 {
3147 	struct scx_sched *sch = scx_root;
3148 	struct cgroup_subsys_state *css;
3149 	struct task_struct *p;
3150 
3151 	if (!scx_cgroup_enabled)
3152 		return;
3153 
3154 	cgroup_taskset_for_each(p, css, tset) {
3155 		if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
3156 		    p->scx.cgrp_moving_from)
3157 			SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL,
3158 				    p, p->scx.cgrp_moving_from, css->cgroup);
3159 		p->scx.cgrp_moving_from = NULL;
3160 	}
3161 }
3162 
scx_group_set_weight(struct task_group * tg,unsigned long weight)3163 void scx_group_set_weight(struct task_group *tg, unsigned long weight)
3164 {
3165 	struct scx_sched *sch = scx_root;
3166 
3167 	percpu_down_read(&scx_cgroup_ops_rwsem);
3168 
3169 	if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) &&
3170 	    tg->scx.weight != weight)
3171 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_weight, NULL,
3172 			    tg_cgrp(tg), weight);
3173 
3174 	tg->scx.weight = weight;
3175 
3176 	percpu_up_read(&scx_cgroup_ops_rwsem);
3177 }
3178 
scx_group_set_idle(struct task_group * tg,bool idle)3179 void scx_group_set_idle(struct task_group *tg, bool idle)
3180 {
3181 	/* TODO: Implement ops->cgroup_set_idle() */
3182 }
3183 
scx_group_set_bandwidth(struct task_group * tg,u64 period_us,u64 quota_us,u64 burst_us)3184 void scx_group_set_bandwidth(struct task_group *tg,
3185 			     u64 period_us, u64 quota_us, u64 burst_us)
3186 {
3187 	struct scx_sched *sch = scx_root;
3188 
3189 	percpu_down_read(&scx_cgroup_ops_rwsem);
3190 
3191 	if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_bandwidth) &&
3192 	    (tg->scx.bw_period_us != period_us ||
3193 	     tg->scx.bw_quota_us != quota_us ||
3194 	     tg->scx.bw_burst_us != burst_us))
3195 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_bandwidth, NULL,
3196 			    tg_cgrp(tg), period_us, quota_us, burst_us);
3197 
3198 	tg->scx.bw_period_us = period_us;
3199 	tg->scx.bw_quota_us = quota_us;
3200 	tg->scx.bw_burst_us = burst_us;
3201 
3202 	percpu_up_read(&scx_cgroup_ops_rwsem);
3203 }
3204 
scx_cgroup_lock(void)3205 static void scx_cgroup_lock(void)
3206 {
3207 	percpu_down_write(&scx_cgroup_ops_rwsem);
3208 	cgroup_lock();
3209 }
3210 
scx_cgroup_unlock(void)3211 static void scx_cgroup_unlock(void)
3212 {
3213 	cgroup_unlock();
3214 	percpu_up_write(&scx_cgroup_ops_rwsem);
3215 }
3216 
3217 #else	/* CONFIG_EXT_GROUP_SCHED */
3218 
scx_cgroup_lock(void)3219 static void scx_cgroup_lock(void) {}
scx_cgroup_unlock(void)3220 static void scx_cgroup_unlock(void) {}
3221 
3222 #endif	/* CONFIG_EXT_GROUP_SCHED */
3223 
3224 /*
3225  * Omitted operations:
3226  *
3227  * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
3228  *   isn't tied to the CPU at that point. Preemption is implemented by resetting
3229  *   the victim task's slice to 0 and triggering reschedule on the target CPU.
3230  *
3231  * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
3232  *
3233  * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
3234  *   their current sched_class. Call them directly from sched core instead.
3235  */
3236 DEFINE_SCHED_CLASS(ext) = {
3237 	.enqueue_task		= enqueue_task_scx,
3238 	.dequeue_task		= dequeue_task_scx,
3239 	.yield_task		= yield_task_scx,
3240 	.yield_to_task		= yield_to_task_scx,
3241 
3242 	.wakeup_preempt		= wakeup_preempt_scx,
3243 
3244 	.balance		= balance_scx,
3245 	.pick_task		= pick_task_scx,
3246 
3247 	.put_prev_task		= put_prev_task_scx,
3248 	.set_next_task		= set_next_task_scx,
3249 
3250 	.select_task_rq		= select_task_rq_scx,
3251 	.task_woken		= task_woken_scx,
3252 	.set_cpus_allowed	= set_cpus_allowed_scx,
3253 
3254 	.rq_online		= rq_online_scx,
3255 	.rq_offline		= rq_offline_scx,
3256 
3257 	.task_tick		= task_tick_scx,
3258 
3259 	.switching_to		= switching_to_scx,
3260 	.switched_from		= switched_from_scx,
3261 	.switched_to		= switched_to_scx,
3262 	.reweight_task		= reweight_task_scx,
3263 	.prio_changed		= prio_changed_scx,
3264 
3265 	.update_curr		= update_curr_scx,
3266 
3267 #ifdef CONFIG_UCLAMP_TASK
3268 	.uclamp_enabled		= 1,
3269 #endif
3270 };
3271 
init_dsq(struct scx_dispatch_q * dsq,u64 dsq_id)3272 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
3273 {
3274 	memset(dsq, 0, sizeof(*dsq));
3275 
3276 	raw_spin_lock_init(&dsq->lock);
3277 	INIT_LIST_HEAD(&dsq->list);
3278 	dsq->id = dsq_id;
3279 }
3280 
free_dsq_irq_workfn(struct irq_work * irq_work)3281 static void free_dsq_irq_workfn(struct irq_work *irq_work)
3282 {
3283 	struct llist_node *to_free = llist_del_all(&dsqs_to_free);
3284 	struct scx_dispatch_q *dsq, *tmp_dsq;
3285 
3286 	llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
3287 		kfree_rcu(dsq, rcu);
3288 }
3289 
3290 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
3291 
destroy_dsq(struct scx_sched * sch,u64 dsq_id)3292 static void destroy_dsq(struct scx_sched *sch, u64 dsq_id)
3293 {
3294 	struct scx_dispatch_q *dsq;
3295 	unsigned long flags;
3296 
3297 	rcu_read_lock();
3298 
3299 	dsq = find_user_dsq(sch, dsq_id);
3300 	if (!dsq)
3301 		goto out_unlock_rcu;
3302 
3303 	raw_spin_lock_irqsave(&dsq->lock, flags);
3304 
3305 	if (dsq->nr) {
3306 		scx_error(sch, "attempting to destroy in-use dsq 0x%016llx (nr=%u)",
3307 			  dsq->id, dsq->nr);
3308 		goto out_unlock_dsq;
3309 	}
3310 
3311 	if (rhashtable_remove_fast(&sch->dsq_hash, &dsq->hash_node,
3312 				   dsq_hash_params))
3313 		goto out_unlock_dsq;
3314 
3315 	/*
3316 	 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
3317 	 * queueing more tasks. As this function can be called from anywhere,
3318 	 * freeing is bounced through an irq work to avoid nesting RCU
3319 	 * operations inside scheduler locks.
3320 	 */
3321 	dsq->id = SCX_DSQ_INVALID;
3322 	llist_add(&dsq->free_node, &dsqs_to_free);
3323 	irq_work_queue(&free_dsq_irq_work);
3324 
3325 out_unlock_dsq:
3326 	raw_spin_unlock_irqrestore(&dsq->lock, flags);
3327 out_unlock_rcu:
3328 	rcu_read_unlock();
3329 }
3330 
3331 #ifdef CONFIG_EXT_GROUP_SCHED
scx_cgroup_exit(struct scx_sched * sch)3332 static void scx_cgroup_exit(struct scx_sched *sch)
3333 {
3334 	struct cgroup_subsys_state *css;
3335 
3336 	scx_cgroup_enabled = false;
3337 
3338 	/*
3339 	 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk
3340 	 * cgroups and exit all the inited ones, all online cgroups are exited.
3341 	 */
3342 	css_for_each_descendant_post(css, &root_task_group.css) {
3343 		struct task_group *tg = css_tg(css);
3344 
3345 		if (!(tg->scx.flags & SCX_TG_INITED))
3346 			continue;
3347 		tg->scx.flags &= ~SCX_TG_INITED;
3348 
3349 		if (!sch->ops.cgroup_exit)
3350 			continue;
3351 
3352 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL,
3353 			    css->cgroup);
3354 	}
3355 }
3356 
scx_cgroup_init(struct scx_sched * sch)3357 static int scx_cgroup_init(struct scx_sched *sch)
3358 {
3359 	struct cgroup_subsys_state *css;
3360 	int ret;
3361 
3362 	/*
3363 	 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk
3364 	 * cgroups and init, all online cgroups are initialized.
3365 	 */
3366 	css_for_each_descendant_pre(css, &root_task_group.css) {
3367 		struct task_group *tg = css_tg(css);
3368 		struct scx_cgroup_init_args args = {
3369 			.weight = tg->scx.weight,
3370 			.bw_period_us = tg->scx.bw_period_us,
3371 			.bw_quota_us = tg->scx.bw_quota_us,
3372 			.bw_burst_us = tg->scx.bw_burst_us,
3373 		};
3374 
3375 		if ((tg->scx.flags &
3376 		     (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
3377 			continue;
3378 
3379 		if (!sch->ops.cgroup_init) {
3380 			tg->scx.flags |= SCX_TG_INITED;
3381 			continue;
3382 		}
3383 
3384 		ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, NULL,
3385 				      css->cgroup, &args);
3386 		if (ret) {
3387 			css_put(css);
3388 			scx_error(sch, "ops.cgroup_init() failed (%d)", ret);
3389 			return ret;
3390 		}
3391 		tg->scx.flags |= SCX_TG_INITED;
3392 	}
3393 
3394 	WARN_ON_ONCE(scx_cgroup_enabled);
3395 	scx_cgroup_enabled = true;
3396 
3397 	return 0;
3398 }
3399 
3400 #else
scx_cgroup_exit(struct scx_sched * sch)3401 static void scx_cgroup_exit(struct scx_sched *sch) {}
scx_cgroup_init(struct scx_sched * sch)3402 static int scx_cgroup_init(struct scx_sched *sch) { return 0; }
3403 #endif
3404 
3405 
3406 /********************************************************************************
3407  * Sysfs interface and ops enable/disable.
3408  */
3409 
3410 #define SCX_ATTR(_name)								\
3411 	static struct kobj_attribute scx_attr_##_name = {			\
3412 		.attr = { .name = __stringify(_name), .mode = 0444 },		\
3413 		.show = scx_attr_##_name##_show,				\
3414 	}
3415 
scx_attr_state_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)3416 static ssize_t scx_attr_state_show(struct kobject *kobj,
3417 				   struct kobj_attribute *ka, char *buf)
3418 {
3419 	return sysfs_emit(buf, "%s\n", scx_enable_state_str[scx_enable_state()]);
3420 }
3421 SCX_ATTR(state);
3422 
scx_attr_switch_all_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)3423 static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
3424 					struct kobj_attribute *ka, char *buf)
3425 {
3426 	return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
3427 }
3428 SCX_ATTR(switch_all);
3429 
scx_attr_nr_rejected_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)3430 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
3431 					 struct kobj_attribute *ka, char *buf)
3432 {
3433 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
3434 }
3435 SCX_ATTR(nr_rejected);
3436 
scx_attr_hotplug_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)3437 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
3438 					 struct kobj_attribute *ka, char *buf)
3439 {
3440 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
3441 }
3442 SCX_ATTR(hotplug_seq);
3443 
scx_attr_enable_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)3444 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
3445 					struct kobj_attribute *ka, char *buf)
3446 {
3447 	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
3448 }
3449 SCX_ATTR(enable_seq);
3450 
3451 static struct attribute *scx_global_attrs[] = {
3452 	&scx_attr_state.attr,
3453 	&scx_attr_switch_all.attr,
3454 	&scx_attr_nr_rejected.attr,
3455 	&scx_attr_hotplug_seq.attr,
3456 	&scx_attr_enable_seq.attr,
3457 	NULL,
3458 };
3459 
3460 static const struct attribute_group scx_global_attr_group = {
3461 	.attrs = scx_global_attrs,
3462 };
3463 
3464 static void free_exit_info(struct scx_exit_info *ei);
3465 
scx_sched_free_rcu_work(struct work_struct * work)3466 static void scx_sched_free_rcu_work(struct work_struct *work)
3467 {
3468 	struct rcu_work *rcu_work = to_rcu_work(work);
3469 	struct scx_sched *sch = container_of(rcu_work, struct scx_sched, rcu_work);
3470 	struct rhashtable_iter rht_iter;
3471 	struct scx_dispatch_q *dsq;
3472 	int node;
3473 
3474 	kthread_stop(sch->helper->task);
3475 	free_percpu(sch->pcpu);
3476 
3477 	for_each_node_state(node, N_POSSIBLE)
3478 		kfree(sch->global_dsqs[node]);
3479 	kfree(sch->global_dsqs);
3480 
3481 	rhashtable_walk_enter(&sch->dsq_hash, &rht_iter);
3482 	do {
3483 		rhashtable_walk_start(&rht_iter);
3484 
3485 		while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
3486 			destroy_dsq(sch, dsq->id);
3487 
3488 		rhashtable_walk_stop(&rht_iter);
3489 	} while (dsq == ERR_PTR(-EAGAIN));
3490 	rhashtable_walk_exit(&rht_iter);
3491 
3492 	rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
3493 	free_exit_info(sch->exit_info);
3494 	kfree(sch);
3495 }
3496 
scx_kobj_release(struct kobject * kobj)3497 static void scx_kobj_release(struct kobject *kobj)
3498 {
3499 	struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
3500 
3501 	INIT_RCU_WORK(&sch->rcu_work, scx_sched_free_rcu_work);
3502 	queue_rcu_work(system_unbound_wq, &sch->rcu_work);
3503 }
3504 
scx_attr_ops_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)3505 static ssize_t scx_attr_ops_show(struct kobject *kobj,
3506 				 struct kobj_attribute *ka, char *buf)
3507 {
3508 	return sysfs_emit(buf, "%s\n", scx_root->ops.name);
3509 }
3510 SCX_ATTR(ops);
3511 
3512 #define scx_attr_event_show(buf, at, events, kind) ({				\
3513 	sysfs_emit_at(buf, at, "%s %llu\n", #kind, (events)->kind);		\
3514 })
3515 
scx_attr_events_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)3516 static ssize_t scx_attr_events_show(struct kobject *kobj,
3517 				    struct kobj_attribute *ka, char *buf)
3518 {
3519 	struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
3520 	struct scx_event_stats events;
3521 	int at = 0;
3522 
3523 	scx_read_events(sch, &events);
3524 	at += scx_attr_event_show(buf, at, &events, SCX_EV_SELECT_CPU_FALLBACK);
3525 	at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
3526 	at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_KEEP_LAST);
3527 	at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_EXITING);
3528 	at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
3529 	at += scx_attr_event_show(buf, at, &events, SCX_EV_REFILL_SLICE_DFL);
3530 	at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DURATION);
3531 	at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DISPATCH);
3532 	at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_ACTIVATE);
3533 	return at;
3534 }
3535 SCX_ATTR(events);
3536 
3537 static struct attribute *scx_sched_attrs[] = {
3538 	&scx_attr_ops.attr,
3539 	&scx_attr_events.attr,
3540 	NULL,
3541 };
3542 ATTRIBUTE_GROUPS(scx_sched);
3543 
3544 static const struct kobj_type scx_ktype = {
3545 	.release = scx_kobj_release,
3546 	.sysfs_ops = &kobj_sysfs_ops,
3547 	.default_groups = scx_sched_groups,
3548 };
3549 
scx_uevent(const struct kobject * kobj,struct kobj_uevent_env * env)3550 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
3551 {
3552 	return add_uevent_var(env, "SCXOPS=%s", scx_root->ops.name);
3553 }
3554 
3555 static const struct kset_uevent_ops scx_uevent_ops = {
3556 	.uevent = scx_uevent,
3557 };
3558 
3559 /*
3560  * Used by sched_fork() and __setscheduler_prio() to pick the matching
3561  * sched_class. dl/rt are already handled.
3562  */
task_should_scx(int policy)3563 bool task_should_scx(int policy)
3564 {
3565 	if (!scx_enabled() || unlikely(scx_enable_state() == SCX_DISABLING))
3566 		return false;
3567 	if (READ_ONCE(scx_switching_all))
3568 		return true;
3569 	return policy == SCHED_EXT;
3570 }
3571 
scx_allow_ttwu_queue(const struct task_struct * p)3572 bool scx_allow_ttwu_queue(const struct task_struct *p)
3573 {
3574 	struct scx_sched *sch;
3575 
3576 	if (!scx_enabled())
3577 		return true;
3578 
3579 	sch = rcu_dereference_sched(scx_root);
3580 	if (unlikely(!sch))
3581 		return true;
3582 
3583 	if (sch->ops.flags & SCX_OPS_ALLOW_QUEUED_WAKEUP)
3584 		return true;
3585 
3586 	if (unlikely(p->sched_class != &ext_sched_class))
3587 		return true;
3588 
3589 	return false;
3590 }
3591 
3592 /**
3593  * scx_rcu_cpu_stall - sched_ext RCU CPU stall handler
3594  *
3595  * While there are various reasons why RCU CPU stalls can occur on a system
3596  * that may not be caused by the current BPF scheduler, try kicking out the
3597  * current scheduler in an attempt to recover the system to a good state before
3598  * issuing panics.
3599  */
scx_rcu_cpu_stall(void)3600 bool scx_rcu_cpu_stall(void)
3601 {
3602 	struct scx_sched *sch;
3603 
3604 	rcu_read_lock();
3605 
3606 	sch = rcu_dereference(scx_root);
3607 	if (unlikely(!sch)) {
3608 		rcu_read_unlock();
3609 		return false;
3610 	}
3611 
3612 	switch (scx_enable_state()) {
3613 	case SCX_ENABLING:
3614 	case SCX_ENABLED:
3615 		break;
3616 	default:
3617 		rcu_read_unlock();
3618 		return false;
3619 	}
3620 
3621 	scx_error(sch, "RCU CPU stall detected!");
3622 	rcu_read_unlock();
3623 
3624 	return true;
3625 }
3626 
3627 /**
3628  * scx_softlockup - sched_ext softlockup handler
3629  * @dur_s: number of seconds of CPU stuck due to soft lockup
3630  *
3631  * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can
3632  * live-lock the system by making many CPUs target the same DSQ to the point
3633  * where soft-lockup detection triggers. This function is called from
3634  * soft-lockup watchdog when the triggering point is close and tries to unjam
3635  * the system by enabling the breather and aborting the BPF scheduler.
3636  */
scx_softlockup(u32 dur_s)3637 void scx_softlockup(u32 dur_s)
3638 {
3639 	struct scx_sched *sch;
3640 
3641 	rcu_read_lock();
3642 
3643 	sch = rcu_dereference(scx_root);
3644 	if (unlikely(!sch))
3645 		goto out_unlock;
3646 
3647 	switch (scx_enable_state()) {
3648 	case SCX_ENABLING:
3649 	case SCX_ENABLED:
3650 		break;
3651 	default:
3652 		goto out_unlock;
3653 	}
3654 
3655 	/* allow only one instance, cleared at the end of scx_bypass() */
3656 	if (test_and_set_bit(0, &scx_in_softlockup))
3657 		goto out_unlock;
3658 
3659 	printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU%d stuck for %us, disabling \"%s\"\n",
3660 			smp_processor_id(), dur_s, scx_root->ops.name);
3661 
3662 	/*
3663 	 * Some CPUs may be trapped in the dispatch paths. Enable breather
3664 	 * immediately; otherwise, we might even be able to get to scx_bypass().
3665 	 */
3666 	atomic_inc(&scx_breather_depth);
3667 
3668 	scx_error(sch, "soft lockup - CPU#%d stuck for %us", smp_processor_id(), dur_s);
3669 out_unlock:
3670 	rcu_read_unlock();
3671 }
3672 
scx_clear_softlockup(void)3673 static void scx_clear_softlockup(void)
3674 {
3675 	if (test_and_clear_bit(0, &scx_in_softlockup))
3676 		atomic_dec(&scx_breather_depth);
3677 }
3678 
3679 /**
3680  * scx_bypass - [Un]bypass scx_ops and guarantee forward progress
3681  * @bypass: true for bypass, false for unbypass
3682  *
3683  * Bypassing guarantees that all runnable tasks make forward progress without
3684  * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
3685  * be held by tasks that the BPF scheduler is forgetting to run, which
3686  * unfortunately also excludes toggling the static branches.
3687  *
3688  * Let's work around by overriding a couple ops and modifying behaviors based on
3689  * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
3690  * to force global FIFO scheduling.
3691  *
3692  * - ops.select_cpu() is ignored and the default select_cpu() is used.
3693  *
3694  * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
3695  *   %SCX_OPS_ENQ_LAST is also ignored.
3696  *
3697  * - ops.dispatch() is ignored.
3698  *
3699  * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
3700  *   can't be trusted. Whenever a tick triggers, the running task is rotated to
3701  *   the tail of the queue with core_sched_at touched.
3702  *
3703  * - pick_next_task() suppresses zero slice warning.
3704  *
3705  * - scx_kick_cpu() is disabled to avoid irq_work malfunction during PM
3706  *   operations.
3707  *
3708  * - scx_prio_less() reverts to the default core_sched_at order.
3709  */
scx_bypass(bool bypass)3710 static void scx_bypass(bool bypass)
3711 {
3712 	static DEFINE_RAW_SPINLOCK(bypass_lock);
3713 	static unsigned long bypass_timestamp;
3714 	struct scx_sched *sch;
3715 	unsigned long flags;
3716 	int cpu;
3717 
3718 	raw_spin_lock_irqsave(&bypass_lock, flags);
3719 	sch = rcu_dereference_bh(scx_root);
3720 
3721 	if (bypass) {
3722 		scx_bypass_depth++;
3723 		WARN_ON_ONCE(scx_bypass_depth <= 0);
3724 		if (scx_bypass_depth != 1)
3725 			goto unlock;
3726 		bypass_timestamp = ktime_get_ns();
3727 		if (sch)
3728 			scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1);
3729 	} else {
3730 		scx_bypass_depth--;
3731 		WARN_ON_ONCE(scx_bypass_depth < 0);
3732 		if (scx_bypass_depth != 0)
3733 			goto unlock;
3734 		if (sch)
3735 			scx_add_event(sch, SCX_EV_BYPASS_DURATION,
3736 				      ktime_get_ns() - bypass_timestamp);
3737 	}
3738 
3739 	atomic_inc(&scx_breather_depth);
3740 
3741 	/*
3742 	 * No task property is changing. We just need to make sure all currently
3743 	 * queued tasks are re-queued according to the new scx_rq_bypassing()
3744 	 * state. As an optimization, walk each rq's runnable_list instead of
3745 	 * the scx_tasks list.
3746 	 *
3747 	 * This function can't trust the scheduler and thus can't use
3748 	 * cpus_read_lock(). Walk all possible CPUs instead of online.
3749 	 */
3750 	for_each_possible_cpu(cpu) {
3751 		struct rq *rq = cpu_rq(cpu);
3752 		struct task_struct *p, *n;
3753 
3754 		raw_spin_rq_lock(rq);
3755 
3756 		if (bypass) {
3757 			WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
3758 			rq->scx.flags |= SCX_RQ_BYPASSING;
3759 		} else {
3760 			WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
3761 			rq->scx.flags &= ~SCX_RQ_BYPASSING;
3762 		}
3763 
3764 		/*
3765 		 * We need to guarantee that no tasks are on the BPF scheduler
3766 		 * while bypassing. Either we see enabled or the enable path
3767 		 * sees scx_rq_bypassing() before moving tasks to SCX.
3768 		 */
3769 		if (!scx_enabled()) {
3770 			raw_spin_rq_unlock(rq);
3771 			continue;
3772 		}
3773 
3774 		/*
3775 		 * The use of list_for_each_entry_safe_reverse() is required
3776 		 * because each task is going to be removed from and added back
3777 		 * to the runnable_list during iteration. Because they're added
3778 		 * to the tail of the list, safe reverse iteration can still
3779 		 * visit all nodes.
3780 		 */
3781 		list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
3782 						 scx.runnable_node) {
3783 			struct sched_enq_and_set_ctx ctx;
3784 
3785 			/* cycling deq/enq is enough, see the function comment */
3786 			sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
3787 			sched_enq_and_set_task(&ctx);
3788 		}
3789 
3790 		/* resched to restore ticks and idle state */
3791 		if (cpu_online(cpu) || cpu == smp_processor_id())
3792 			resched_curr(rq);
3793 
3794 		raw_spin_rq_unlock(rq);
3795 	}
3796 
3797 	atomic_dec(&scx_breather_depth);
3798 unlock:
3799 	raw_spin_unlock_irqrestore(&bypass_lock, flags);
3800 	scx_clear_softlockup();
3801 }
3802 
free_exit_info(struct scx_exit_info * ei)3803 static void free_exit_info(struct scx_exit_info *ei)
3804 {
3805 	kvfree(ei->dump);
3806 	kfree(ei->msg);
3807 	kfree(ei->bt);
3808 	kfree(ei);
3809 }
3810 
alloc_exit_info(size_t exit_dump_len)3811 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
3812 {
3813 	struct scx_exit_info *ei;
3814 
3815 	ei = kzalloc(sizeof(*ei), GFP_KERNEL);
3816 	if (!ei)
3817 		return NULL;
3818 
3819 	ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
3820 	ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
3821 	ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL);
3822 
3823 	if (!ei->bt || !ei->msg || !ei->dump) {
3824 		free_exit_info(ei);
3825 		return NULL;
3826 	}
3827 
3828 	return ei;
3829 }
3830 
scx_exit_reason(enum scx_exit_kind kind)3831 static const char *scx_exit_reason(enum scx_exit_kind kind)
3832 {
3833 	switch (kind) {
3834 	case SCX_EXIT_UNREG:
3835 		return "unregistered from user space";
3836 	case SCX_EXIT_UNREG_BPF:
3837 		return "unregistered from BPF";
3838 	case SCX_EXIT_UNREG_KERN:
3839 		return "unregistered from the main kernel";
3840 	case SCX_EXIT_SYSRQ:
3841 		return "disabled by sysrq-S";
3842 	case SCX_EXIT_ERROR:
3843 		return "runtime error";
3844 	case SCX_EXIT_ERROR_BPF:
3845 		return "scx_bpf_error";
3846 	case SCX_EXIT_ERROR_STALL:
3847 		return "runnable task stall";
3848 	default:
3849 		return "<UNKNOWN>";
3850 	}
3851 }
3852 
scx_disable_workfn(struct kthread_work * work)3853 static void scx_disable_workfn(struct kthread_work *work)
3854 {
3855 	struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
3856 	struct scx_exit_info *ei = sch->exit_info;
3857 	struct scx_task_iter sti;
3858 	struct task_struct *p;
3859 	int kind, cpu;
3860 
3861 	kind = atomic_read(&sch->exit_kind);
3862 	while (true) {
3863 		if (kind == SCX_EXIT_DONE)	/* already disabled? */
3864 			return;
3865 		WARN_ON_ONCE(kind == SCX_EXIT_NONE);
3866 		if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE))
3867 			break;
3868 	}
3869 	ei->kind = kind;
3870 	ei->reason = scx_exit_reason(ei->kind);
3871 
3872 	/* guarantee forward progress by bypassing scx_ops */
3873 	scx_bypass(true);
3874 
3875 	switch (scx_set_enable_state(SCX_DISABLING)) {
3876 	case SCX_DISABLING:
3877 		WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
3878 		break;
3879 	case SCX_DISABLED:
3880 		pr_warn("sched_ext: ops error detected without ops (%s)\n",
3881 			sch->exit_info->msg);
3882 		WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
3883 		goto done;
3884 	default:
3885 		break;
3886 	}
3887 
3888 	/*
3889 	 * Here, every runnable task is guaranteed to make forward progress and
3890 	 * we can safely use blocking synchronization constructs. Actually
3891 	 * disable ops.
3892 	 */
3893 	mutex_lock(&scx_enable_mutex);
3894 
3895 	static_branch_disable(&__scx_switched_all);
3896 	WRITE_ONCE(scx_switching_all, false);
3897 
3898 	/*
3899 	 * Shut down cgroup support before tasks so that the cgroup attach path
3900 	 * doesn't race against scx_exit_task().
3901 	 */
3902 	scx_cgroup_lock();
3903 	scx_cgroup_exit(sch);
3904 	scx_cgroup_unlock();
3905 
3906 	/*
3907 	 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
3908 	 * must be switched out and exited synchronously.
3909 	 */
3910 	percpu_down_write(&scx_fork_rwsem);
3911 
3912 	scx_init_task_enabled = false;
3913 
3914 	scx_task_iter_start(&sti);
3915 	while ((p = scx_task_iter_next_locked(&sti))) {
3916 		const struct sched_class *old_class = p->sched_class;
3917 		const struct sched_class *new_class =
3918 			__setscheduler_class(p->policy, p->prio);
3919 		struct sched_enq_and_set_ctx ctx;
3920 
3921 		if (old_class != new_class && p->se.sched_delayed)
3922 			dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
3923 
3924 		sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
3925 
3926 		p->sched_class = new_class;
3927 		check_class_changing(task_rq(p), p, old_class);
3928 
3929 		sched_enq_and_set_task(&ctx);
3930 
3931 		check_class_changed(task_rq(p), p, old_class, p->prio);
3932 		scx_exit_task(p);
3933 	}
3934 	scx_task_iter_stop(&sti);
3935 	percpu_up_write(&scx_fork_rwsem);
3936 
3937 	/*
3938 	 * Invalidate all the rq clocks to prevent getting outdated
3939 	 * rq clocks from a previous scx scheduler.
3940 	 */
3941 	for_each_possible_cpu(cpu) {
3942 		struct rq *rq = cpu_rq(cpu);
3943 		scx_rq_clock_invalidate(rq);
3944 	}
3945 
3946 	/* no task is on scx, turn off all the switches and flush in-progress calls */
3947 	static_branch_disable(&__scx_enabled);
3948 	bitmap_zero(sch->has_op, SCX_OPI_END);
3949 	scx_idle_disable();
3950 	synchronize_rcu();
3951 
3952 	if (ei->kind >= SCX_EXIT_ERROR) {
3953 		pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
3954 		       sch->ops.name, ei->reason);
3955 
3956 		if (ei->msg[0] != '\0')
3957 			pr_err("sched_ext: %s: %s\n", sch->ops.name, ei->msg);
3958 #ifdef CONFIG_STACKTRACE
3959 		stack_trace_print(ei->bt, ei->bt_len, 2);
3960 #endif
3961 	} else {
3962 		pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
3963 			sch->ops.name, ei->reason);
3964 	}
3965 
3966 	if (sch->ops.exit)
3967 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, exit, NULL, ei);
3968 
3969 	cancel_delayed_work_sync(&scx_watchdog_work);
3970 
3971 	/*
3972 	 * scx_root clearing must be inside cpus_read_lock(). See
3973 	 * handle_hotplug().
3974 	 */
3975 	cpus_read_lock();
3976 	RCU_INIT_POINTER(scx_root, NULL);
3977 	cpus_read_unlock();
3978 
3979 	/*
3980 	 * Delete the kobject from the hierarchy synchronously. Otherwise, sysfs
3981 	 * could observe an object of the same name still in the hierarchy when
3982 	 * the next scheduler is loaded.
3983 	 */
3984 	kobject_del(&sch->kobj);
3985 
3986 	free_percpu(scx_dsp_ctx);
3987 	scx_dsp_ctx = NULL;
3988 	scx_dsp_max_batch = 0;
3989 
3990 	mutex_unlock(&scx_enable_mutex);
3991 
3992 	WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
3993 done:
3994 	scx_bypass(false);
3995 }
3996 
scx_disable(enum scx_exit_kind kind)3997 static void scx_disable(enum scx_exit_kind kind)
3998 {
3999 	int none = SCX_EXIT_NONE;
4000 	struct scx_sched *sch;
4001 
4002 	if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
4003 		kind = SCX_EXIT_ERROR;
4004 
4005 	rcu_read_lock();
4006 	sch = rcu_dereference(scx_root);
4007 	if (sch) {
4008 		atomic_try_cmpxchg(&sch->exit_kind, &none, kind);
4009 		kthread_queue_work(sch->helper, &sch->disable_work);
4010 	}
4011 	rcu_read_unlock();
4012 }
4013 
dump_newline(struct seq_buf * s)4014 static void dump_newline(struct seq_buf *s)
4015 {
4016 	trace_sched_ext_dump("");
4017 
4018 	/* @s may be zero sized and seq_buf triggers WARN if so */
4019 	if (s->size)
4020 		seq_buf_putc(s, '\n');
4021 }
4022 
dump_line(struct seq_buf * s,const char * fmt,...)4023 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
4024 {
4025 	va_list args;
4026 
4027 #ifdef CONFIG_TRACEPOINTS
4028 	if (trace_sched_ext_dump_enabled()) {
4029 		/* protected by scx_dump_state()::dump_lock */
4030 		static char line_buf[SCX_EXIT_MSG_LEN];
4031 
4032 		va_start(args, fmt);
4033 		vscnprintf(line_buf, sizeof(line_buf), fmt, args);
4034 		va_end(args);
4035 
4036 		trace_sched_ext_dump(line_buf);
4037 	}
4038 #endif
4039 	/* @s may be zero sized and seq_buf triggers WARN if so */
4040 	if (s->size) {
4041 		va_start(args, fmt);
4042 		seq_buf_vprintf(s, fmt, args);
4043 		va_end(args);
4044 
4045 		seq_buf_putc(s, '\n');
4046 	}
4047 }
4048 
dump_stack_trace(struct seq_buf * s,const char * prefix,const unsigned long * bt,unsigned int len)4049 static void dump_stack_trace(struct seq_buf *s, const char *prefix,
4050 			     const unsigned long *bt, unsigned int len)
4051 {
4052 	unsigned int i;
4053 
4054 	for (i = 0; i < len; i++)
4055 		dump_line(s, "%s%pS", prefix, (void *)bt[i]);
4056 }
4057 
ops_dump_init(struct seq_buf * s,const char * prefix)4058 static void ops_dump_init(struct seq_buf *s, const char *prefix)
4059 {
4060 	struct scx_dump_data *dd = &scx_dump_data;
4061 
4062 	lockdep_assert_irqs_disabled();
4063 
4064 	dd->cpu = smp_processor_id();		/* allow scx_bpf_dump() */
4065 	dd->first = true;
4066 	dd->cursor = 0;
4067 	dd->s = s;
4068 	dd->prefix = prefix;
4069 }
4070 
ops_dump_flush(void)4071 static void ops_dump_flush(void)
4072 {
4073 	struct scx_dump_data *dd = &scx_dump_data;
4074 	char *line = dd->buf.line;
4075 
4076 	if (!dd->cursor)
4077 		return;
4078 
4079 	/*
4080 	 * There's something to flush and this is the first line. Insert a blank
4081 	 * line to distinguish ops dump.
4082 	 */
4083 	if (dd->first) {
4084 		dump_newline(dd->s);
4085 		dd->first = false;
4086 	}
4087 
4088 	/*
4089 	 * There may be multiple lines in $line. Scan and emit each line
4090 	 * separately.
4091 	 */
4092 	while (true) {
4093 		char *end = line;
4094 		char c;
4095 
4096 		while (*end != '\n' && *end != '\0')
4097 			end++;
4098 
4099 		/*
4100 		 * If $line overflowed, it may not have newline at the end.
4101 		 * Always emit with a newline.
4102 		 */
4103 		c = *end;
4104 		*end = '\0';
4105 		dump_line(dd->s, "%s%s", dd->prefix, line);
4106 		if (c == '\0')
4107 			break;
4108 
4109 		/* move to the next line */
4110 		end++;
4111 		if (*end == '\0')
4112 			break;
4113 		line = end;
4114 	}
4115 
4116 	dd->cursor = 0;
4117 }
4118 
ops_dump_exit(void)4119 static void ops_dump_exit(void)
4120 {
4121 	ops_dump_flush();
4122 	scx_dump_data.cpu = -1;
4123 }
4124 
scx_dump_task(struct seq_buf * s,struct scx_dump_ctx * dctx,struct task_struct * p,char marker)4125 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
4126 			  struct task_struct *p, char marker)
4127 {
4128 	static unsigned long bt[SCX_EXIT_BT_LEN];
4129 	struct scx_sched *sch = scx_root;
4130 	char dsq_id_buf[19] = "(n/a)";
4131 	unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
4132 	unsigned int bt_len = 0;
4133 
4134 	if (p->scx.dsq)
4135 		scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
4136 			  (unsigned long long)p->scx.dsq->id);
4137 
4138 	dump_newline(s);
4139 	dump_line(s, " %c%c %s[%d] %+ldms",
4140 		  marker, task_state_to_char(p), p->comm, p->pid,
4141 		  jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
4142 	dump_line(s, "      scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
4143 		  scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
4144 		  p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
4145 		  ops_state >> SCX_OPSS_QSEQ_SHIFT);
4146 	dump_line(s, "      sticky/holding_cpu=%d/%d dsq_id=%s",
4147 		  p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf);
4148 	dump_line(s, "      dsq_vtime=%llu slice=%llu weight=%u",
4149 		  p->scx.dsq_vtime, p->scx.slice, p->scx.weight);
4150 	dump_line(s, "      cpus=%*pb no_mig=%u", cpumask_pr_args(p->cpus_ptr),
4151 		  p->migration_disabled);
4152 
4153 	if (SCX_HAS_OP(sch, dump_task)) {
4154 		ops_dump_init(s, "    ");
4155 		SCX_CALL_OP(sch, SCX_KF_REST, dump_task, NULL, dctx, p);
4156 		ops_dump_exit();
4157 	}
4158 
4159 #ifdef CONFIG_STACKTRACE
4160 	bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
4161 #endif
4162 	if (bt_len) {
4163 		dump_newline(s);
4164 		dump_stack_trace(s, "    ", bt, bt_len);
4165 	}
4166 }
4167 
scx_dump_state(struct scx_exit_info * ei,size_t dump_len)4168 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
4169 {
4170 	static DEFINE_SPINLOCK(dump_lock);
4171 	static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
4172 	struct scx_sched *sch = scx_root;
4173 	struct scx_dump_ctx dctx = {
4174 		.kind = ei->kind,
4175 		.exit_code = ei->exit_code,
4176 		.reason = ei->reason,
4177 		.at_ns = ktime_get_ns(),
4178 		.at_jiffies = jiffies,
4179 	};
4180 	struct seq_buf s;
4181 	struct scx_event_stats events;
4182 	unsigned long flags;
4183 	char *buf;
4184 	int cpu;
4185 
4186 	spin_lock_irqsave(&dump_lock, flags);
4187 
4188 	seq_buf_init(&s, ei->dump, dump_len);
4189 
4190 	if (ei->kind == SCX_EXIT_NONE) {
4191 		dump_line(&s, "Debug dump triggered by %s", ei->reason);
4192 	} else {
4193 		dump_line(&s, "%s[%d] triggered exit kind %d:",
4194 			  current->comm, current->pid, ei->kind);
4195 		dump_line(&s, "  %s (%s)", ei->reason, ei->msg);
4196 		dump_newline(&s);
4197 		dump_line(&s, "Backtrace:");
4198 		dump_stack_trace(&s, "  ", ei->bt, ei->bt_len);
4199 	}
4200 
4201 	if (SCX_HAS_OP(sch, dump)) {
4202 		ops_dump_init(&s, "");
4203 		SCX_CALL_OP(sch, SCX_KF_UNLOCKED, dump, NULL, &dctx);
4204 		ops_dump_exit();
4205 	}
4206 
4207 	dump_newline(&s);
4208 	dump_line(&s, "CPU states");
4209 	dump_line(&s, "----------");
4210 
4211 	for_each_possible_cpu(cpu) {
4212 		struct rq *rq = cpu_rq(cpu);
4213 		struct rq_flags rf;
4214 		struct task_struct *p;
4215 		struct seq_buf ns;
4216 		size_t avail, used;
4217 		bool idle;
4218 
4219 		rq_lock(rq, &rf);
4220 
4221 		idle = list_empty(&rq->scx.runnable_list) &&
4222 			rq->curr->sched_class == &idle_sched_class;
4223 
4224 		if (idle && !SCX_HAS_OP(sch, dump_cpu))
4225 			goto next;
4226 
4227 		/*
4228 		 * We don't yet know whether ops.dump_cpu() will produce output
4229 		 * and we may want to skip the default CPU dump if it doesn't.
4230 		 * Use a nested seq_buf to generate the standard dump so that we
4231 		 * can decide whether to commit later.
4232 		 */
4233 		avail = seq_buf_get_buf(&s, &buf);
4234 		seq_buf_init(&ns, buf, avail);
4235 
4236 		dump_newline(&ns);
4237 		dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu",
4238 			  cpu, rq->scx.nr_running, rq->scx.flags,
4239 			  rq->scx.cpu_released, rq->scx.ops_qseq,
4240 			  rq->scx.pnt_seq);
4241 		dump_line(&ns, "          curr=%s[%d] class=%ps",
4242 			  rq->curr->comm, rq->curr->pid,
4243 			  rq->curr->sched_class);
4244 		if (!cpumask_empty(rq->scx.cpus_to_kick))
4245 			dump_line(&ns, "  cpus_to_kick   : %*pb",
4246 				  cpumask_pr_args(rq->scx.cpus_to_kick));
4247 		if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
4248 			dump_line(&ns, "  idle_to_kick   : %*pb",
4249 				  cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
4250 		if (!cpumask_empty(rq->scx.cpus_to_preempt))
4251 			dump_line(&ns, "  cpus_to_preempt: %*pb",
4252 				  cpumask_pr_args(rq->scx.cpus_to_preempt));
4253 		if (!cpumask_empty(rq->scx.cpus_to_wait))
4254 			dump_line(&ns, "  cpus_to_wait   : %*pb",
4255 				  cpumask_pr_args(rq->scx.cpus_to_wait));
4256 
4257 		used = seq_buf_used(&ns);
4258 		if (SCX_HAS_OP(sch, dump_cpu)) {
4259 			ops_dump_init(&ns, "  ");
4260 			SCX_CALL_OP(sch, SCX_KF_REST, dump_cpu, NULL,
4261 				    &dctx, cpu, idle);
4262 			ops_dump_exit();
4263 		}
4264 
4265 		/*
4266 		 * If idle && nothing generated by ops.dump_cpu(), there's
4267 		 * nothing interesting. Skip.
4268 		 */
4269 		if (idle && used == seq_buf_used(&ns))
4270 			goto next;
4271 
4272 		/*
4273 		 * $s may already have overflowed when $ns was created. If so,
4274 		 * calling commit on it will trigger BUG.
4275 		 */
4276 		if (avail) {
4277 			seq_buf_commit(&s, seq_buf_used(&ns));
4278 			if (seq_buf_has_overflowed(&ns))
4279 				seq_buf_set_overflow(&s);
4280 		}
4281 
4282 		if (rq->curr->sched_class == &ext_sched_class)
4283 			scx_dump_task(&s, &dctx, rq->curr, '*');
4284 
4285 		list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
4286 			scx_dump_task(&s, &dctx, p, ' ');
4287 	next:
4288 		rq_unlock(rq, &rf);
4289 	}
4290 
4291 	dump_newline(&s);
4292 	dump_line(&s, "Event counters");
4293 	dump_line(&s, "--------------");
4294 
4295 	scx_read_events(sch, &events);
4296 	scx_dump_event(s, &events, SCX_EV_SELECT_CPU_FALLBACK);
4297 	scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
4298 	scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST);
4299 	scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_EXITING);
4300 	scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
4301 	scx_dump_event(s, &events, SCX_EV_REFILL_SLICE_DFL);
4302 	scx_dump_event(s, &events, SCX_EV_BYPASS_DURATION);
4303 	scx_dump_event(s, &events, SCX_EV_BYPASS_DISPATCH);
4304 	scx_dump_event(s, &events, SCX_EV_BYPASS_ACTIVATE);
4305 
4306 	if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
4307 		memcpy(ei->dump + dump_len - sizeof(trunc_marker),
4308 		       trunc_marker, sizeof(trunc_marker));
4309 
4310 	spin_unlock_irqrestore(&dump_lock, flags);
4311 }
4312 
scx_error_irq_workfn(struct irq_work * irq_work)4313 static void scx_error_irq_workfn(struct irq_work *irq_work)
4314 {
4315 	struct scx_sched *sch = container_of(irq_work, struct scx_sched, error_irq_work);
4316 	struct scx_exit_info *ei = sch->exit_info;
4317 
4318 	if (ei->kind >= SCX_EXIT_ERROR)
4319 		scx_dump_state(ei, sch->ops.exit_dump_len);
4320 
4321 	kthread_queue_work(sch->helper, &sch->disable_work);
4322 }
4323 
scx_vexit(struct scx_sched * sch,enum scx_exit_kind kind,s64 exit_code,const char * fmt,va_list args)4324 static void scx_vexit(struct scx_sched *sch,
4325 		      enum scx_exit_kind kind, s64 exit_code,
4326 		      const char *fmt, va_list args)
4327 {
4328 	struct scx_exit_info *ei = sch->exit_info;
4329 	int none = SCX_EXIT_NONE;
4330 
4331 	if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
4332 		return;
4333 
4334 	ei->exit_code = exit_code;
4335 #ifdef CONFIG_STACKTRACE
4336 	if (kind >= SCX_EXIT_ERROR)
4337 		ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
4338 #endif
4339 	vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
4340 
4341 	/*
4342 	 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
4343 	 * in scx_disable_workfn().
4344 	 */
4345 	ei->kind = kind;
4346 	ei->reason = scx_exit_reason(ei->kind);
4347 
4348 	irq_work_queue(&sch->error_irq_work);
4349 }
4350 
scx_alloc_and_add_sched(struct sched_ext_ops * ops)4351 static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
4352 {
4353 	struct scx_sched *sch;
4354 	int node, ret;
4355 
4356 	sch = kzalloc(sizeof(*sch), GFP_KERNEL);
4357 	if (!sch)
4358 		return ERR_PTR(-ENOMEM);
4359 
4360 	sch->exit_info = alloc_exit_info(ops->exit_dump_len);
4361 	if (!sch->exit_info) {
4362 		ret = -ENOMEM;
4363 		goto err_free_sch;
4364 	}
4365 
4366 	ret = rhashtable_init(&sch->dsq_hash, &dsq_hash_params);
4367 	if (ret < 0)
4368 		goto err_free_ei;
4369 
4370 	sch->global_dsqs = kcalloc(nr_node_ids, sizeof(sch->global_dsqs[0]),
4371 				   GFP_KERNEL);
4372 	if (!sch->global_dsqs) {
4373 		ret = -ENOMEM;
4374 		goto err_free_hash;
4375 	}
4376 
4377 	for_each_node_state(node, N_POSSIBLE) {
4378 		struct scx_dispatch_q *dsq;
4379 
4380 		dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
4381 		if (!dsq) {
4382 			ret = -ENOMEM;
4383 			goto err_free_gdsqs;
4384 		}
4385 
4386 		init_dsq(dsq, SCX_DSQ_GLOBAL);
4387 		sch->global_dsqs[node] = dsq;
4388 	}
4389 
4390 	sch->pcpu = alloc_percpu(struct scx_sched_pcpu);
4391 	if (!sch->pcpu)
4392 		goto err_free_gdsqs;
4393 
4394 	sch->helper = kthread_run_worker(0, "sched_ext_helper");
4395 	if (!sch->helper)
4396 		goto err_free_pcpu;
4397 	sched_set_fifo(sch->helper->task);
4398 
4399 	atomic_set(&sch->exit_kind, SCX_EXIT_NONE);
4400 	init_irq_work(&sch->error_irq_work, scx_error_irq_workfn);
4401 	kthread_init_work(&sch->disable_work, scx_disable_workfn);
4402 	sch->ops = *ops;
4403 	ops->priv = sch;
4404 
4405 	sch->kobj.kset = scx_kset;
4406 	ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root");
4407 	if (ret < 0)
4408 		goto err_stop_helper;
4409 
4410 	return sch;
4411 
4412 err_stop_helper:
4413 	kthread_stop(sch->helper->task);
4414 err_free_pcpu:
4415 	free_percpu(sch->pcpu);
4416 err_free_gdsqs:
4417 	for_each_node_state(node, N_POSSIBLE)
4418 		kfree(sch->global_dsqs[node]);
4419 	kfree(sch->global_dsqs);
4420 err_free_hash:
4421 	rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
4422 err_free_ei:
4423 	free_exit_info(sch->exit_info);
4424 err_free_sch:
4425 	kfree(sch);
4426 	return ERR_PTR(ret);
4427 }
4428 
check_hotplug_seq(struct scx_sched * sch,const struct sched_ext_ops * ops)4429 static void check_hotplug_seq(struct scx_sched *sch,
4430 			      const struct sched_ext_ops *ops)
4431 {
4432 	unsigned long long global_hotplug_seq;
4433 
4434 	/*
4435 	 * If a hotplug event has occurred between when a scheduler was
4436 	 * initialized, and when we were able to attach, exit and notify user
4437 	 * space about it.
4438 	 */
4439 	if (ops->hotplug_seq) {
4440 		global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
4441 		if (ops->hotplug_seq != global_hotplug_seq) {
4442 			scx_exit(sch, SCX_EXIT_UNREG_KERN,
4443 				 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
4444 				 "expected hotplug seq %llu did not match actual %llu",
4445 				 ops->hotplug_seq, global_hotplug_seq);
4446 		}
4447 	}
4448 }
4449 
validate_ops(struct scx_sched * sch,const struct sched_ext_ops * ops)4450 static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops)
4451 {
4452 	/*
4453 	 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
4454 	 * ops.enqueue() callback isn't implemented.
4455 	 */
4456 	if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
4457 		scx_error(sch, "SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
4458 		return -EINVAL;
4459 	}
4460 
4461 	/*
4462 	 * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle
4463 	 * selection policy to be enabled.
4464 	 */
4465 	if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) &&
4466 	    (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) {
4467 		scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled");
4468 		return -EINVAL;
4469 	}
4470 
4471 	if (ops->flags & SCX_OPS_HAS_CGROUP_WEIGHT)
4472 		pr_warn("SCX_OPS_HAS_CGROUP_WEIGHT is deprecated and a noop\n");
4473 
4474 	return 0;
4475 }
4476 
scx_enable(struct sched_ext_ops * ops,struct bpf_link * link)4477 static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
4478 {
4479 	struct scx_sched *sch;
4480 	struct scx_task_iter sti;
4481 	struct task_struct *p;
4482 	unsigned long timeout;
4483 	int i, cpu, ret;
4484 
4485 	if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
4486 			   cpu_possible_mask)) {
4487 		pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
4488 		return -EINVAL;
4489 	}
4490 
4491 	mutex_lock(&scx_enable_mutex);
4492 
4493 	if (scx_enable_state() != SCX_DISABLED) {
4494 		ret = -EBUSY;
4495 		goto err_unlock;
4496 	}
4497 
4498 	sch = scx_alloc_and_add_sched(ops);
4499 	if (IS_ERR(sch)) {
4500 		ret = PTR_ERR(sch);
4501 		goto err_unlock;
4502 	}
4503 
4504 	/*
4505 	 * Transition to ENABLING and clear exit info to arm the disable path.
4506 	 * Failure triggers full disabling from here on.
4507 	 */
4508 	WARN_ON_ONCE(scx_set_enable_state(SCX_ENABLING) != SCX_DISABLED);
4509 	WARN_ON_ONCE(scx_root);
4510 
4511 	atomic_long_set(&scx_nr_rejected, 0);
4512 
4513 	for_each_possible_cpu(cpu)
4514 		cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
4515 
4516 	/*
4517 	 * Keep CPUs stable during enable so that the BPF scheduler can track
4518 	 * online CPUs by watching ->on/offline_cpu() after ->init().
4519 	 */
4520 	cpus_read_lock();
4521 
4522 	/*
4523 	 * Make the scheduler instance visible. Must be inside cpus_read_lock().
4524 	 * See handle_hotplug().
4525 	 */
4526 	rcu_assign_pointer(scx_root, sch);
4527 
4528 	scx_idle_enable(ops);
4529 
4530 	if (sch->ops.init) {
4531 		ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init, NULL);
4532 		if (ret) {
4533 			ret = ops_sanitize_err(sch, "init", ret);
4534 			cpus_read_unlock();
4535 			scx_error(sch, "ops.init() failed (%d)", ret);
4536 			goto err_disable;
4537 		}
4538 		sch->exit_info->flags |= SCX_EFLAG_INITIALIZED;
4539 	}
4540 
4541 	for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
4542 		if (((void (**)(void))ops)[i])
4543 			set_bit(i, sch->has_op);
4544 
4545 	check_hotplug_seq(sch, ops);
4546 	scx_idle_update_selcpu_topology(ops);
4547 
4548 	cpus_read_unlock();
4549 
4550 	ret = validate_ops(sch, ops);
4551 	if (ret)
4552 		goto err_disable;
4553 
4554 	WARN_ON_ONCE(scx_dsp_ctx);
4555 	scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
4556 	scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
4557 						   scx_dsp_max_batch),
4558 				     __alignof__(struct scx_dsp_ctx));
4559 	if (!scx_dsp_ctx) {
4560 		ret = -ENOMEM;
4561 		goto err_disable;
4562 	}
4563 
4564 	if (ops->timeout_ms)
4565 		timeout = msecs_to_jiffies(ops->timeout_ms);
4566 	else
4567 		timeout = SCX_WATCHDOG_MAX_TIMEOUT;
4568 
4569 	WRITE_ONCE(scx_watchdog_timeout, timeout);
4570 	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
4571 	queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
4572 			   scx_watchdog_timeout / 2);
4573 
4574 	/*
4575 	 * Once __scx_enabled is set, %current can be switched to SCX anytime.
4576 	 * This can lead to stalls as some BPF schedulers (e.g. userspace
4577 	 * scheduling) may not function correctly before all tasks are switched.
4578 	 * Init in bypass mode to guarantee forward progress.
4579 	 */
4580 	scx_bypass(true);
4581 
4582 	for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
4583 		if (((void (**)(void))ops)[i])
4584 			set_bit(i, sch->has_op);
4585 
4586 	if (sch->ops.cpu_acquire || sch->ops.cpu_release)
4587 		sch->ops.flags |= SCX_OPS_HAS_CPU_PREEMPT;
4588 
4589 	/*
4590 	 * Lock out forks, cgroup on/offlining and moves before opening the
4591 	 * floodgate so that they don't wander into the operations prematurely.
4592 	 */
4593 	percpu_down_write(&scx_fork_rwsem);
4594 
4595 	WARN_ON_ONCE(scx_init_task_enabled);
4596 	scx_init_task_enabled = true;
4597 
4598 	/*
4599 	 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
4600 	 * preventing new tasks from being added. No need to exclude tasks
4601 	 * leaving as sched_ext_free() can handle both prepped and enabled
4602 	 * tasks. Prep all tasks first and then enable them with preemption
4603 	 * disabled.
4604 	 *
4605 	 * All cgroups should be initialized before scx_init_task() so that the
4606 	 * BPF scheduler can reliably track each task's cgroup membership from
4607 	 * scx_init_task(). Lock out cgroup on/offlining and task migrations
4608 	 * while tasks are being initialized so that scx_cgroup_can_attach()
4609 	 * never sees uninitialized tasks.
4610 	 */
4611 	scx_cgroup_lock();
4612 	ret = scx_cgroup_init(sch);
4613 	if (ret)
4614 		goto err_disable_unlock_all;
4615 
4616 	scx_task_iter_start(&sti);
4617 	while ((p = scx_task_iter_next_locked(&sti))) {
4618 		/*
4619 		 * @p may already be dead, have lost all its usages counts and
4620 		 * be waiting for RCU grace period before being freed. @p can't
4621 		 * be initialized for SCX in such cases and should be ignored.
4622 		 */
4623 		if (!tryget_task_struct(p))
4624 			continue;
4625 
4626 		scx_task_iter_unlock(&sti);
4627 
4628 		ret = scx_init_task(p, task_group(p), false);
4629 		if (ret) {
4630 			put_task_struct(p);
4631 			scx_task_iter_stop(&sti);
4632 			scx_error(sch, "ops.init_task() failed (%d) for %s[%d]",
4633 				  ret, p->comm, p->pid);
4634 			goto err_disable_unlock_all;
4635 		}
4636 
4637 		scx_set_task_state(p, SCX_TASK_READY);
4638 
4639 		put_task_struct(p);
4640 	}
4641 	scx_task_iter_stop(&sti);
4642 	scx_cgroup_unlock();
4643 	percpu_up_write(&scx_fork_rwsem);
4644 
4645 	/*
4646 	 * All tasks are READY. It's safe to turn on scx_enabled() and switch
4647 	 * all eligible tasks.
4648 	 */
4649 	WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
4650 	static_branch_enable(&__scx_enabled);
4651 
4652 	/*
4653 	 * We're fully committed and can't fail. The task READY -> ENABLED
4654 	 * transitions here are synchronized against sched_ext_free() through
4655 	 * scx_tasks_lock.
4656 	 */
4657 	percpu_down_write(&scx_fork_rwsem);
4658 	scx_task_iter_start(&sti);
4659 	while ((p = scx_task_iter_next_locked(&sti))) {
4660 		const struct sched_class *old_class = p->sched_class;
4661 		const struct sched_class *new_class =
4662 			__setscheduler_class(p->policy, p->prio);
4663 		struct sched_enq_and_set_ctx ctx;
4664 
4665 		if (!tryget_task_struct(p))
4666 			continue;
4667 
4668 		if (old_class != new_class && p->se.sched_delayed)
4669 			dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
4670 
4671 		sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4672 
4673 		p->scx.slice = SCX_SLICE_DFL;
4674 		p->sched_class = new_class;
4675 		check_class_changing(task_rq(p), p, old_class);
4676 
4677 		sched_enq_and_set_task(&ctx);
4678 
4679 		check_class_changed(task_rq(p), p, old_class, p->prio);
4680 		put_task_struct(p);
4681 	}
4682 	scx_task_iter_stop(&sti);
4683 	percpu_up_write(&scx_fork_rwsem);
4684 
4685 	scx_bypass(false);
4686 
4687 	if (!scx_tryset_enable_state(SCX_ENABLED, SCX_ENABLING)) {
4688 		WARN_ON_ONCE(atomic_read(&sch->exit_kind) == SCX_EXIT_NONE);
4689 		goto err_disable;
4690 	}
4691 
4692 	if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
4693 		static_branch_enable(&__scx_switched_all);
4694 
4695 	pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
4696 		sch->ops.name, scx_switched_all() ? "" : " (partial)");
4697 	kobject_uevent(&sch->kobj, KOBJ_ADD);
4698 	mutex_unlock(&scx_enable_mutex);
4699 
4700 	atomic_long_inc(&scx_enable_seq);
4701 
4702 	return 0;
4703 
4704 err_unlock:
4705 	mutex_unlock(&scx_enable_mutex);
4706 	return ret;
4707 
4708 err_disable_unlock_all:
4709 	scx_cgroup_unlock();
4710 	percpu_up_write(&scx_fork_rwsem);
4711 	/* we'll soon enter disable path, keep bypass on */
4712 err_disable:
4713 	mutex_unlock(&scx_enable_mutex);
4714 	/*
4715 	 * Returning an error code here would not pass all the error information
4716 	 * to userspace. Record errno using scx_error() for cases scx_error()
4717 	 * wasn't already invoked and exit indicating success so that the error
4718 	 * is notified through ops.exit() with all the details.
4719 	 *
4720 	 * Flush scx_disable_work to ensure that error is reported before init
4721 	 * completion. sch's base reference will be put by bpf_scx_unreg().
4722 	 */
4723 	scx_error(sch, "scx_enable() failed (%d)", ret);
4724 	kthread_flush_work(&sch->disable_work);
4725 	return 0;
4726 }
4727 
4728 
4729 /********************************************************************************
4730  * bpf_struct_ops plumbing.
4731  */
4732 #include <linux/bpf_verifier.h>
4733 #include <linux/bpf.h>
4734 #include <linux/btf.h>
4735 
4736 static const struct btf_type *task_struct_type;
4737 
bpf_scx_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)4738 static bool bpf_scx_is_valid_access(int off, int size,
4739 				    enum bpf_access_type type,
4740 				    const struct bpf_prog *prog,
4741 				    struct bpf_insn_access_aux *info)
4742 {
4743 	if (type != BPF_READ)
4744 		return false;
4745 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
4746 		return false;
4747 	if (off % size != 0)
4748 		return false;
4749 
4750 	return btf_ctx_access(off, size, type, prog, info);
4751 }
4752 
bpf_scx_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size)4753 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
4754 				     const struct bpf_reg_state *reg, int off,
4755 				     int size)
4756 {
4757 	const struct btf_type *t;
4758 
4759 	t = btf_type_by_id(reg->btf, reg->btf_id);
4760 	if (t == task_struct_type) {
4761 		if (off >= offsetof(struct task_struct, scx.slice) &&
4762 		    off + size <= offsetofend(struct task_struct, scx.slice))
4763 			return SCALAR_VALUE;
4764 		if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
4765 		    off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
4766 			return SCALAR_VALUE;
4767 		if (off >= offsetof(struct task_struct, scx.disallow) &&
4768 		    off + size <= offsetofend(struct task_struct, scx.disallow))
4769 			return SCALAR_VALUE;
4770 	}
4771 
4772 	return -EACCES;
4773 }
4774 
4775 static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
4776 	.get_func_proto = bpf_base_func_proto,
4777 	.is_valid_access = bpf_scx_is_valid_access,
4778 	.btf_struct_access = bpf_scx_btf_struct_access,
4779 };
4780 
bpf_scx_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)4781 static int bpf_scx_init_member(const struct btf_type *t,
4782 			       const struct btf_member *member,
4783 			       void *kdata, const void *udata)
4784 {
4785 	const struct sched_ext_ops *uops = udata;
4786 	struct sched_ext_ops *ops = kdata;
4787 	u32 moff = __btf_member_bit_offset(t, member) / 8;
4788 	int ret;
4789 
4790 	switch (moff) {
4791 	case offsetof(struct sched_ext_ops, dispatch_max_batch):
4792 		if (*(u32 *)(udata + moff) > INT_MAX)
4793 			return -E2BIG;
4794 		ops->dispatch_max_batch = *(u32 *)(udata + moff);
4795 		return 1;
4796 	case offsetof(struct sched_ext_ops, flags):
4797 		if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
4798 			return -EINVAL;
4799 		ops->flags = *(u64 *)(udata + moff);
4800 		return 1;
4801 	case offsetof(struct sched_ext_ops, name):
4802 		ret = bpf_obj_name_cpy(ops->name, uops->name,
4803 				       sizeof(ops->name));
4804 		if (ret < 0)
4805 			return ret;
4806 		if (ret == 0)
4807 			return -EINVAL;
4808 		return 1;
4809 	case offsetof(struct sched_ext_ops, timeout_ms):
4810 		if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
4811 		    SCX_WATCHDOG_MAX_TIMEOUT)
4812 			return -E2BIG;
4813 		ops->timeout_ms = *(u32 *)(udata + moff);
4814 		return 1;
4815 	case offsetof(struct sched_ext_ops, exit_dump_len):
4816 		ops->exit_dump_len =
4817 			*(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
4818 		return 1;
4819 	case offsetof(struct sched_ext_ops, hotplug_seq):
4820 		ops->hotplug_seq = *(u64 *)(udata + moff);
4821 		return 1;
4822 	}
4823 
4824 	return 0;
4825 }
4826 
bpf_scx_check_member(const struct btf_type * t,const struct btf_member * member,const struct bpf_prog * prog)4827 static int bpf_scx_check_member(const struct btf_type *t,
4828 				const struct btf_member *member,
4829 				const struct bpf_prog *prog)
4830 {
4831 	u32 moff = __btf_member_bit_offset(t, member) / 8;
4832 
4833 	switch (moff) {
4834 	case offsetof(struct sched_ext_ops, init_task):
4835 #ifdef CONFIG_EXT_GROUP_SCHED
4836 	case offsetof(struct sched_ext_ops, cgroup_init):
4837 	case offsetof(struct sched_ext_ops, cgroup_exit):
4838 	case offsetof(struct sched_ext_ops, cgroup_prep_move):
4839 #endif
4840 	case offsetof(struct sched_ext_ops, cpu_online):
4841 	case offsetof(struct sched_ext_ops, cpu_offline):
4842 	case offsetof(struct sched_ext_ops, init):
4843 	case offsetof(struct sched_ext_ops, exit):
4844 		break;
4845 	default:
4846 		if (prog->sleepable)
4847 			return -EINVAL;
4848 	}
4849 
4850 	return 0;
4851 }
4852 
bpf_scx_reg(void * kdata,struct bpf_link * link)4853 static int bpf_scx_reg(void *kdata, struct bpf_link *link)
4854 {
4855 	return scx_enable(kdata, link);
4856 }
4857 
bpf_scx_unreg(void * kdata,struct bpf_link * link)4858 static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
4859 {
4860 	struct sched_ext_ops *ops = kdata;
4861 	struct scx_sched *sch = ops->priv;
4862 
4863 	scx_disable(SCX_EXIT_UNREG);
4864 	kthread_flush_work(&sch->disable_work);
4865 	kobject_put(&sch->kobj);
4866 }
4867 
bpf_scx_init(struct btf * btf)4868 static int bpf_scx_init(struct btf *btf)
4869 {
4870 	task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]);
4871 
4872 	return 0;
4873 }
4874 
bpf_scx_update(void * kdata,void * old_kdata,struct bpf_link * link)4875 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
4876 {
4877 	/*
4878 	 * sched_ext does not support updating the actively-loaded BPF
4879 	 * scheduler, as registering a BPF scheduler can always fail if the
4880 	 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
4881 	 * etc. Similarly, we can always race with unregistration happening
4882 	 * elsewhere, such as with sysrq.
4883 	 */
4884 	return -EOPNOTSUPP;
4885 }
4886 
bpf_scx_validate(void * kdata)4887 static int bpf_scx_validate(void *kdata)
4888 {
4889 	return 0;
4890 }
4891 
sched_ext_ops__select_cpu(struct task_struct * p,s32 prev_cpu,u64 wake_flags)4892 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
sched_ext_ops__enqueue(struct task_struct * p,u64 enq_flags)4893 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__dequeue(struct task_struct * p,u64 enq_flags)4894 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__dispatch(s32 prev_cpu,struct task_struct * prev__nullable)4895 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {}
sched_ext_ops__tick(struct task_struct * p)4896 static void sched_ext_ops__tick(struct task_struct *p) {}
sched_ext_ops__runnable(struct task_struct * p,u64 enq_flags)4897 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__running(struct task_struct * p)4898 static void sched_ext_ops__running(struct task_struct *p) {}
sched_ext_ops__stopping(struct task_struct * p,bool runnable)4899 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {}
sched_ext_ops__quiescent(struct task_struct * p,u64 deq_flags)4900 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {}
sched_ext_ops__yield(struct task_struct * from,struct task_struct * to__nullable)4901 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; }
sched_ext_ops__core_sched_before(struct task_struct * a,struct task_struct * b)4902 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; }
sched_ext_ops__set_weight(struct task_struct * p,u32 weight)4903 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {}
sched_ext_ops__set_cpumask(struct task_struct * p,const struct cpumask * mask)4904 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {}
sched_ext_ops__update_idle(s32 cpu,bool idle)4905 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {}
sched_ext_ops__cpu_acquire(s32 cpu,struct scx_cpu_acquire_args * args)4906 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {}
sched_ext_ops__cpu_release(s32 cpu,struct scx_cpu_release_args * args)4907 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {}
sched_ext_ops__init_task(struct task_struct * p,struct scx_init_task_args * args)4908 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
sched_ext_ops__exit_task(struct task_struct * p,struct scx_exit_task_args * args)4909 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {}
sched_ext_ops__enable(struct task_struct * p)4910 static void sched_ext_ops__enable(struct task_struct *p) {}
sched_ext_ops__disable(struct task_struct * p)4911 static void sched_ext_ops__disable(struct task_struct *p) {}
4912 #ifdef CONFIG_EXT_GROUP_SCHED
sched_ext_ops__cgroup_init(struct cgroup * cgrp,struct scx_cgroup_init_args * args)4913 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
sched_ext_ops__cgroup_exit(struct cgroup * cgrp)4914 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {}
sched_ext_ops__cgroup_prep_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)4915 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
sched_ext_ops__cgroup_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)4916 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
sched_ext_ops__cgroup_cancel_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)4917 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
sched_ext_ops__cgroup_set_weight(struct cgroup * cgrp,u32 weight)4918 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {}
sched_ext_ops__cgroup_set_bandwidth(struct cgroup * cgrp,u64 period_us,u64 quota_us,u64 burst_us)4919 static void sched_ext_ops__cgroup_set_bandwidth(struct cgroup *cgrp, u64 period_us, u64 quota_us, u64 burst_us) {}
4920 #endif
sched_ext_ops__cpu_online(s32 cpu)4921 static void sched_ext_ops__cpu_online(s32 cpu) {}
sched_ext_ops__cpu_offline(s32 cpu)4922 static void sched_ext_ops__cpu_offline(s32 cpu) {}
sched_ext_ops__init(void)4923 static s32 sched_ext_ops__init(void) { return -EINVAL; }
sched_ext_ops__exit(struct scx_exit_info * info)4924 static void sched_ext_ops__exit(struct scx_exit_info *info) {}
sched_ext_ops__dump(struct scx_dump_ctx * ctx)4925 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {}
sched_ext_ops__dump_cpu(struct scx_dump_ctx * ctx,s32 cpu,bool idle)4926 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
sched_ext_ops__dump_task(struct scx_dump_ctx * ctx,struct task_struct * p)4927 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {}
4928 
4929 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
4930 	.select_cpu		= sched_ext_ops__select_cpu,
4931 	.enqueue		= sched_ext_ops__enqueue,
4932 	.dequeue		= sched_ext_ops__dequeue,
4933 	.dispatch		= sched_ext_ops__dispatch,
4934 	.tick			= sched_ext_ops__tick,
4935 	.runnable		= sched_ext_ops__runnable,
4936 	.running		= sched_ext_ops__running,
4937 	.stopping		= sched_ext_ops__stopping,
4938 	.quiescent		= sched_ext_ops__quiescent,
4939 	.yield			= sched_ext_ops__yield,
4940 	.core_sched_before	= sched_ext_ops__core_sched_before,
4941 	.set_weight		= sched_ext_ops__set_weight,
4942 	.set_cpumask		= sched_ext_ops__set_cpumask,
4943 	.update_idle		= sched_ext_ops__update_idle,
4944 	.cpu_acquire		= sched_ext_ops__cpu_acquire,
4945 	.cpu_release		= sched_ext_ops__cpu_release,
4946 	.init_task		= sched_ext_ops__init_task,
4947 	.exit_task		= sched_ext_ops__exit_task,
4948 	.enable			= sched_ext_ops__enable,
4949 	.disable		= sched_ext_ops__disable,
4950 #ifdef CONFIG_EXT_GROUP_SCHED
4951 	.cgroup_init		= sched_ext_ops__cgroup_init,
4952 	.cgroup_exit		= sched_ext_ops__cgroup_exit,
4953 	.cgroup_prep_move	= sched_ext_ops__cgroup_prep_move,
4954 	.cgroup_move		= sched_ext_ops__cgroup_move,
4955 	.cgroup_cancel_move	= sched_ext_ops__cgroup_cancel_move,
4956 	.cgroup_set_weight	= sched_ext_ops__cgroup_set_weight,
4957 	.cgroup_set_bandwidth	= sched_ext_ops__cgroup_set_bandwidth,
4958 #endif
4959 	.cpu_online		= sched_ext_ops__cpu_online,
4960 	.cpu_offline		= sched_ext_ops__cpu_offline,
4961 	.init			= sched_ext_ops__init,
4962 	.exit			= sched_ext_ops__exit,
4963 	.dump			= sched_ext_ops__dump,
4964 	.dump_cpu		= sched_ext_ops__dump_cpu,
4965 	.dump_task		= sched_ext_ops__dump_task,
4966 };
4967 
4968 static struct bpf_struct_ops bpf_sched_ext_ops = {
4969 	.verifier_ops = &bpf_scx_verifier_ops,
4970 	.reg = bpf_scx_reg,
4971 	.unreg = bpf_scx_unreg,
4972 	.check_member = bpf_scx_check_member,
4973 	.init_member = bpf_scx_init_member,
4974 	.init = bpf_scx_init,
4975 	.update = bpf_scx_update,
4976 	.validate = bpf_scx_validate,
4977 	.name = "sched_ext_ops",
4978 	.owner = THIS_MODULE,
4979 	.cfi_stubs = &__bpf_ops_sched_ext_ops
4980 };
4981 
4982 
4983 /********************************************************************************
4984  * System integration and init.
4985  */
4986 
sysrq_handle_sched_ext_reset(u8 key)4987 static void sysrq_handle_sched_ext_reset(u8 key)
4988 {
4989 	scx_disable(SCX_EXIT_SYSRQ);
4990 }
4991 
4992 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
4993 	.handler	= sysrq_handle_sched_ext_reset,
4994 	.help_msg	= "reset-sched-ext(S)",
4995 	.action_msg	= "Disable sched_ext and revert all tasks to CFS",
4996 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
4997 };
4998 
sysrq_handle_sched_ext_dump(u8 key)4999 static void sysrq_handle_sched_ext_dump(u8 key)
5000 {
5001 	struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
5002 
5003 	if (scx_enabled())
5004 		scx_dump_state(&ei, 0);
5005 }
5006 
5007 static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
5008 	.handler	= sysrq_handle_sched_ext_dump,
5009 	.help_msg	= "dump-sched-ext(D)",
5010 	.action_msg	= "Trigger sched_ext debug dump",
5011 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
5012 };
5013 
can_skip_idle_kick(struct rq * rq)5014 static bool can_skip_idle_kick(struct rq *rq)
5015 {
5016 	lockdep_assert_rq_held(rq);
5017 
5018 	/*
5019 	 * We can skip idle kicking if @rq is going to go through at least one
5020 	 * full SCX scheduling cycle before going idle. Just checking whether
5021 	 * curr is not idle is insufficient because we could be racing
5022 	 * balance_one() trying to pull the next task from a remote rq, which
5023 	 * may fail, and @rq may become idle afterwards.
5024 	 *
5025 	 * The race window is small and we don't and can't guarantee that @rq is
5026 	 * only kicked while idle anyway. Skip only when sure.
5027 	 */
5028 	return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
5029 }
5030 
kick_one_cpu(s32 cpu,struct rq * this_rq,unsigned long * pseqs)5031 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
5032 {
5033 	struct rq *rq = cpu_rq(cpu);
5034 	struct scx_rq *this_scx = &this_rq->scx;
5035 	bool should_wait = false;
5036 	unsigned long flags;
5037 
5038 	raw_spin_rq_lock_irqsave(rq, flags);
5039 
5040 	/*
5041 	 * During CPU hotplug, a CPU may depend on kicking itself to make
5042 	 * forward progress. Allow kicking self regardless of online state.
5043 	 */
5044 	if (cpu_online(cpu) || cpu == cpu_of(this_rq)) {
5045 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
5046 			if (rq->curr->sched_class == &ext_sched_class)
5047 				rq->curr->scx.slice = 0;
5048 			cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5049 		}
5050 
5051 		if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
5052 			pseqs[cpu] = rq->scx.pnt_seq;
5053 			should_wait = true;
5054 		}
5055 
5056 		resched_curr(rq);
5057 	} else {
5058 		cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5059 		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5060 	}
5061 
5062 	raw_spin_rq_unlock_irqrestore(rq, flags);
5063 
5064 	return should_wait;
5065 }
5066 
kick_one_cpu_if_idle(s32 cpu,struct rq * this_rq)5067 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
5068 {
5069 	struct rq *rq = cpu_rq(cpu);
5070 	unsigned long flags;
5071 
5072 	raw_spin_rq_lock_irqsave(rq, flags);
5073 
5074 	if (!can_skip_idle_kick(rq) &&
5075 	    (cpu_online(cpu) || cpu == cpu_of(this_rq)))
5076 		resched_curr(rq);
5077 
5078 	raw_spin_rq_unlock_irqrestore(rq, flags);
5079 }
5080 
kick_cpus_irq_workfn(struct irq_work * irq_work)5081 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
5082 {
5083 	struct rq *this_rq = this_rq();
5084 	struct scx_rq *this_scx = &this_rq->scx;
5085 	unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
5086 	bool should_wait = false;
5087 	s32 cpu;
5088 
5089 	for_each_cpu(cpu, this_scx->cpus_to_kick) {
5090 		should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
5091 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
5092 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5093 	}
5094 
5095 	for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
5096 		kick_one_cpu_if_idle(cpu, this_rq);
5097 		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5098 	}
5099 
5100 	if (!should_wait)
5101 		return;
5102 
5103 	for_each_cpu(cpu, this_scx->cpus_to_wait) {
5104 		unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq;
5105 
5106 		if (cpu != cpu_of(this_rq)) {
5107 			/*
5108 			 * Pairs with smp_store_release() issued by this CPU in
5109 			 * switch_class() on the resched path.
5110 			 *
5111 			 * We busy-wait here to guarantee that no other task can
5112 			 * be scheduled on our core before the target CPU has
5113 			 * entered the resched path.
5114 			 */
5115 			while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu])
5116 				cpu_relax();
5117 		}
5118 
5119 		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5120 	}
5121 }
5122 
5123 /**
5124  * print_scx_info - print out sched_ext scheduler state
5125  * @log_lvl: the log level to use when printing
5126  * @p: target task
5127  *
5128  * If a sched_ext scheduler is enabled, print the name and state of the
5129  * scheduler. If @p is on sched_ext, print further information about the task.
5130  *
5131  * This function can be safely called on any task as long as the task_struct
5132  * itself is accessible. While safe, this function isn't synchronized and may
5133  * print out mixups or garbages of limited length.
5134  */
print_scx_info(const char * log_lvl,struct task_struct * p)5135 void print_scx_info(const char *log_lvl, struct task_struct *p)
5136 {
5137 	struct scx_sched *sch = scx_root;
5138 	enum scx_enable_state state = scx_enable_state();
5139 	const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
5140 	char runnable_at_buf[22] = "?";
5141 	struct sched_class *class;
5142 	unsigned long runnable_at;
5143 
5144 	if (state == SCX_DISABLED)
5145 		return;
5146 
5147 	/*
5148 	 * Carefully check if the task was running on sched_ext, and then
5149 	 * carefully copy the time it's been runnable, and its state.
5150 	 */
5151 	if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
5152 	    class != &ext_sched_class) {
5153 		printk("%sSched_ext: %s (%s%s)", log_lvl, sch->ops.name,
5154 		       scx_enable_state_str[state], all);
5155 		return;
5156 	}
5157 
5158 	if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
5159 				      sizeof(runnable_at)))
5160 		scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
5161 			  jiffies_delta_msecs(runnable_at, jiffies));
5162 
5163 	/* print everything onto one line to conserve console space */
5164 	printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
5165 	       log_lvl, sch->ops.name, scx_enable_state_str[state], all,
5166 	       runnable_at_buf);
5167 }
5168 
scx_pm_handler(struct notifier_block * nb,unsigned long event,void * ptr)5169 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
5170 {
5171 	/*
5172 	 * SCX schedulers often have userspace components which are sometimes
5173 	 * involved in critial scheduling paths. PM operations involve freezing
5174 	 * userspace which can lead to scheduling misbehaviors including stalls.
5175 	 * Let's bypass while PM operations are in progress.
5176 	 */
5177 	switch (event) {
5178 	case PM_HIBERNATION_PREPARE:
5179 	case PM_SUSPEND_PREPARE:
5180 	case PM_RESTORE_PREPARE:
5181 		scx_bypass(true);
5182 		break;
5183 	case PM_POST_HIBERNATION:
5184 	case PM_POST_SUSPEND:
5185 	case PM_POST_RESTORE:
5186 		scx_bypass(false);
5187 		break;
5188 	}
5189 
5190 	return NOTIFY_OK;
5191 }
5192 
5193 static struct notifier_block scx_pm_notifier = {
5194 	.notifier_call = scx_pm_handler,
5195 };
5196 
init_sched_ext_class(void)5197 void __init init_sched_ext_class(void)
5198 {
5199 	s32 cpu, v;
5200 
5201 	/*
5202 	 * The following is to prevent the compiler from optimizing out the enum
5203 	 * definitions so that BPF scheduler implementations can use them
5204 	 * through the generated vmlinux.h.
5205 	 */
5206 	WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
5207 		   SCX_TG_ONLINE);
5208 
5209 	scx_idle_init_masks();
5210 
5211 	scx_kick_cpus_pnt_seqs =
5212 		__alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
5213 			       __alignof__(scx_kick_cpus_pnt_seqs[0]));
5214 	BUG_ON(!scx_kick_cpus_pnt_seqs);
5215 
5216 	for_each_possible_cpu(cpu) {
5217 		struct rq *rq = cpu_rq(cpu);
5218 		int  n = cpu_to_node(cpu);
5219 
5220 		init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
5221 		INIT_LIST_HEAD(&rq->scx.runnable_list);
5222 		INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
5223 
5224 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n));
5225 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n));
5226 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n));
5227 		BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n));
5228 		init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
5229 		init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
5230 
5231 		if (cpu_online(cpu))
5232 			cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
5233 	}
5234 
5235 	register_sysrq_key('S', &sysrq_sched_ext_reset_op);
5236 	register_sysrq_key('D', &sysrq_sched_ext_dump_op);
5237 	INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
5238 }
5239 
5240 
5241 /********************************************************************************
5242  * Helpers that can be called from the BPF scheduler.
5243  */
scx_dsq_insert_preamble(struct scx_sched * sch,struct task_struct * p,u64 enq_flags)5244 static bool scx_dsq_insert_preamble(struct scx_sched *sch, struct task_struct *p,
5245 				    u64 enq_flags)
5246 {
5247 	if (!scx_kf_allowed(sch, SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
5248 		return false;
5249 
5250 	lockdep_assert_irqs_disabled();
5251 
5252 	if (unlikely(!p)) {
5253 		scx_error(sch, "called with NULL task");
5254 		return false;
5255 	}
5256 
5257 	if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
5258 		scx_error(sch, "invalid enq_flags 0x%llx", enq_flags);
5259 		return false;
5260 	}
5261 
5262 	return true;
5263 }
5264 
scx_dsq_insert_commit(struct scx_sched * sch,struct task_struct * p,u64 dsq_id,u64 enq_flags)5265 static void scx_dsq_insert_commit(struct scx_sched *sch, struct task_struct *p,
5266 				  u64 dsq_id, u64 enq_flags)
5267 {
5268 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
5269 	struct task_struct *ddsp_task;
5270 
5271 	ddsp_task = __this_cpu_read(direct_dispatch_task);
5272 	if (ddsp_task) {
5273 		mark_direct_dispatch(sch, ddsp_task, p, dsq_id, enq_flags);
5274 		return;
5275 	}
5276 
5277 	if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
5278 		scx_error(sch, "dispatch buffer overflow");
5279 		return;
5280 	}
5281 
5282 	dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
5283 		.task = p,
5284 		.qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
5285 		.dsq_id = dsq_id,
5286 		.enq_flags = enq_flags,
5287 	};
5288 }
5289 
5290 __bpf_kfunc_start_defs();
5291 
5292 /**
5293  * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ
5294  * @p: task_struct to insert
5295  * @dsq_id: DSQ to insert into
5296  * @slice: duration @p can run for in nsecs, 0 to keep the current value
5297  * @enq_flags: SCX_ENQ_*
5298  *
5299  * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to
5300  * call this function spuriously. Can be called from ops.enqueue(),
5301  * ops.select_cpu(), and ops.dispatch().
5302  *
5303  * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
5304  * and @p must match the task being enqueued.
5305  *
5306  * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
5307  * will be directly inserted into the corresponding dispatch queue after
5308  * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be
5309  * inserted into the local DSQ of the CPU returned by ops.select_cpu().
5310  * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
5311  * task is inserted.
5312  *
5313  * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
5314  * and this function can be called upto ops.dispatch_max_batch times to insert
5315  * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
5316  * remaining slots. scx_bpf_dsq_move_to_local() flushes the batch and resets the
5317  * counter.
5318  *
5319  * This function doesn't have any locking restrictions and may be called under
5320  * BPF locks (in the future when BPF introduces more flexible locking).
5321  *
5322  * @p is allowed to run for @slice. The scheduling path is triggered on slice
5323  * exhaustion. If zero, the current residual slice is maintained. If
5324  * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
5325  * scx_bpf_kick_cpu() to trigger scheduling.
5326  */
scx_bpf_dsq_insert(struct task_struct * p,u64 dsq_id,u64 slice,u64 enq_flags)5327 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice,
5328 				    u64 enq_flags)
5329 {
5330 	struct scx_sched *sch;
5331 
5332 	guard(rcu)();
5333 	sch = rcu_dereference(scx_root);
5334 	if (unlikely(!sch))
5335 		return;
5336 
5337 	if (!scx_dsq_insert_preamble(sch, p, enq_flags))
5338 		return;
5339 
5340 	if (slice)
5341 		p->scx.slice = slice;
5342 	else
5343 		p->scx.slice = p->scx.slice ?: 1;
5344 
5345 	scx_dsq_insert_commit(sch, p, dsq_id, enq_flags);
5346 }
5347 
5348 /**
5349  * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
5350  * @p: task_struct to insert
5351  * @dsq_id: DSQ to insert into
5352  * @slice: duration @p can run for in nsecs, 0 to keep the current value
5353  * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
5354  * @enq_flags: SCX_ENQ_*
5355  *
5356  * Insert @p into the vtime priority queue of the DSQ identified by @dsq_id.
5357  * Tasks queued into the priority queue are ordered by @vtime. All other aspects
5358  * are identical to scx_bpf_dsq_insert().
5359  *
5360  * @vtime ordering is according to time_before64() which considers wrapping. A
5361  * numerically larger vtime may indicate an earlier position in the ordering and
5362  * vice-versa.
5363  *
5364  * A DSQ can only be used as a FIFO or priority queue at any given time and this
5365  * function must not be called on a DSQ which already has one or more FIFO tasks
5366  * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and
5367  * SCX_DSQ_GLOBAL) cannot be used as priority queues.
5368  */
scx_bpf_dsq_insert_vtime(struct task_struct * p,u64 dsq_id,u64 slice,u64 vtime,u64 enq_flags)5369 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id,
5370 					  u64 slice, u64 vtime, u64 enq_flags)
5371 {
5372 	struct scx_sched *sch;
5373 
5374 	guard(rcu)();
5375 	sch = rcu_dereference(scx_root);
5376 	if (unlikely(!sch))
5377 		return;
5378 
5379 	if (!scx_dsq_insert_preamble(sch, p, enq_flags))
5380 		return;
5381 
5382 	if (slice)
5383 		p->scx.slice = slice;
5384 	else
5385 		p->scx.slice = p->scx.slice ?: 1;
5386 
5387 	p->scx.dsq_vtime = vtime;
5388 
5389 	scx_dsq_insert_commit(sch, p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
5390 }
5391 
5392 __bpf_kfunc_end_defs();
5393 
5394 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
5395 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU)
5396 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU)
5397 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
5398 
5399 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
5400 	.owner			= THIS_MODULE,
5401 	.set			= &scx_kfunc_ids_enqueue_dispatch,
5402 };
5403 
scx_dsq_move(struct bpf_iter_scx_dsq_kern * kit,struct task_struct * p,u64 dsq_id,u64 enq_flags)5404 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit,
5405 			 struct task_struct *p, u64 dsq_id, u64 enq_flags)
5406 {
5407 	struct scx_sched *sch = scx_root;
5408 	struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
5409 	struct rq *this_rq, *src_rq, *locked_rq;
5410 	bool dispatched = false;
5411 	bool in_balance;
5412 	unsigned long flags;
5413 
5414 	if (!scx_kf_allowed_if_unlocked() &&
5415 	    !scx_kf_allowed(sch, SCX_KF_DISPATCH))
5416 		return false;
5417 
5418 	/*
5419 	 * Can be called from either ops.dispatch() locking this_rq() or any
5420 	 * context where no rq lock is held. If latter, lock @p's task_rq which
5421 	 * we'll likely need anyway.
5422 	 */
5423 	src_rq = task_rq(p);
5424 
5425 	local_irq_save(flags);
5426 	this_rq = this_rq();
5427 	in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
5428 
5429 	if (in_balance) {
5430 		if (this_rq != src_rq) {
5431 			raw_spin_rq_unlock(this_rq);
5432 			raw_spin_rq_lock(src_rq);
5433 		}
5434 	} else {
5435 		raw_spin_rq_lock(src_rq);
5436 	}
5437 
5438 	/*
5439 	 * If the BPF scheduler keeps calling this function repeatedly, it can
5440 	 * cause similar live-lock conditions as consume_dispatch_q(). Insert a
5441 	 * breather if necessary.
5442 	 */
5443 	scx_breather(src_rq);
5444 
5445 	locked_rq = src_rq;
5446 	raw_spin_lock(&src_dsq->lock);
5447 
5448 	/*
5449 	 * Did someone else get to it? @p could have already left $src_dsq, got
5450 	 * re-enqueud, or be in the process of being consumed by someone else.
5451 	 */
5452 	if (unlikely(p->scx.dsq != src_dsq ||
5453 		     u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
5454 		     p->scx.holding_cpu >= 0) ||
5455 	    WARN_ON_ONCE(src_rq != task_rq(p))) {
5456 		raw_spin_unlock(&src_dsq->lock);
5457 		goto out;
5458 	}
5459 
5460 	/* @p is still on $src_dsq and stable, determine the destination */
5461 	dst_dsq = find_dsq_for_dispatch(sch, this_rq, dsq_id, p);
5462 
5463 	/*
5464 	 * Apply vtime and slice updates before moving so that the new time is
5465 	 * visible before inserting into $dst_dsq. @p is still on $src_dsq but
5466 	 * this is safe as we're locking it.
5467 	 */
5468 	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
5469 		p->scx.dsq_vtime = kit->vtime;
5470 	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
5471 		p->scx.slice = kit->slice;
5472 
5473 	/* execute move */
5474 	locked_rq = move_task_between_dsqs(sch, p, enq_flags, src_dsq, dst_dsq);
5475 	dispatched = true;
5476 out:
5477 	if (in_balance) {
5478 		if (this_rq != locked_rq) {
5479 			raw_spin_rq_unlock(locked_rq);
5480 			raw_spin_rq_lock(this_rq);
5481 		}
5482 	} else {
5483 		raw_spin_rq_unlock_irqrestore(locked_rq, flags);
5484 	}
5485 
5486 	kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
5487 			       __SCX_DSQ_ITER_HAS_VTIME);
5488 	return dispatched;
5489 }
5490 
5491 __bpf_kfunc_start_defs();
5492 
5493 /**
5494  * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
5495  *
5496  * Can only be called from ops.dispatch().
5497  */
scx_bpf_dispatch_nr_slots(void)5498 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
5499 {
5500 	struct scx_sched *sch;
5501 
5502 	guard(rcu)();
5503 
5504 	sch = rcu_dereference(scx_root);
5505 	if (unlikely(!sch))
5506 		return 0;
5507 
5508 	if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
5509 		return 0;
5510 
5511 	return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
5512 }
5513 
5514 /**
5515  * scx_bpf_dispatch_cancel - Cancel the latest dispatch
5516  *
5517  * Cancel the latest dispatch. Can be called multiple times to cancel further
5518  * dispatches. Can only be called from ops.dispatch().
5519  */
scx_bpf_dispatch_cancel(void)5520 __bpf_kfunc void scx_bpf_dispatch_cancel(void)
5521 {
5522 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
5523 	struct scx_sched *sch;
5524 
5525 	guard(rcu)();
5526 
5527 	sch = rcu_dereference(scx_root);
5528 	if (unlikely(!sch))
5529 		return;
5530 
5531 	if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
5532 		return;
5533 
5534 	if (dspc->cursor > 0)
5535 		dspc->cursor--;
5536 	else
5537 		scx_error(sch, "dispatch buffer underflow");
5538 }
5539 
5540 /**
5541  * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ
5542  * @dsq_id: DSQ to move task from
5543  *
5544  * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's
5545  * local DSQ for execution. Can only be called from ops.dispatch().
5546  *
5547  * This function flushes the in-flight dispatches from scx_bpf_dsq_insert()
5548  * before trying to move from the specified DSQ. It may also grab rq locks and
5549  * thus can't be called under any BPF locks.
5550  *
5551  * Returns %true if a task has been moved, %false if there isn't any task to
5552  * move.
5553  */
scx_bpf_dsq_move_to_local(u64 dsq_id)5554 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
5555 {
5556 	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
5557 	struct scx_dispatch_q *dsq;
5558 	struct scx_sched *sch;
5559 
5560 	guard(rcu)();
5561 
5562 	sch = rcu_dereference(scx_root);
5563 	if (unlikely(!sch))
5564 		return false;
5565 
5566 	if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
5567 		return false;
5568 
5569 	flush_dispatch_buf(sch, dspc->rq);
5570 
5571 	dsq = find_user_dsq(sch, dsq_id);
5572 	if (unlikely(!dsq)) {
5573 		scx_error(sch, "invalid DSQ ID 0x%016llx", dsq_id);
5574 		return false;
5575 	}
5576 
5577 	if (consume_dispatch_q(sch, dspc->rq, dsq)) {
5578 		/*
5579 		 * A successfully consumed task can be dequeued before it starts
5580 		 * running while the CPU is trying to migrate other dispatched
5581 		 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
5582 		 * local DSQ.
5583 		 */
5584 		dspc->nr_tasks++;
5585 		return true;
5586 	} else {
5587 		return false;
5588 	}
5589 }
5590 
5591 /**
5592  * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs
5593  * @it__iter: DSQ iterator in progress
5594  * @slice: duration the moved task can run for in nsecs
5595  *
5596  * Override the slice of the next task that will be moved from @it__iter using
5597  * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous
5598  * slice duration is kept.
5599  */
scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq * it__iter,u64 slice)5600 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter,
5601 					    u64 slice)
5602 {
5603 	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
5604 
5605 	kit->slice = slice;
5606 	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
5607 }
5608 
5609 /**
5610  * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs
5611  * @it__iter: DSQ iterator in progress
5612  * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
5613  *
5614  * Override the vtime of the next task that will be moved from @it__iter using
5615  * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice
5616  * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the
5617  * override is ignored and cleared.
5618  */
scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq * it__iter,u64 vtime)5619 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter,
5620 					    u64 vtime)
5621 {
5622 	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
5623 
5624 	kit->vtime = vtime;
5625 	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
5626 }
5627 
5628 /**
5629  * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ
5630  * @it__iter: DSQ iterator in progress
5631  * @p: task to transfer
5632  * @dsq_id: DSQ to move @p to
5633  * @enq_flags: SCX_ENQ_*
5634  *
5635  * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
5636  * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
5637  * be the destination.
5638  *
5639  * For the transfer to be successful, @p must still be on the DSQ and have been
5640  * queued before the DSQ iteration started. This function doesn't care whether
5641  * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
5642  * been queued before the iteration started.
5643  *
5644  * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update.
5645  *
5646  * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
5647  * lock (e.g. BPF timers or SYSCALL programs).
5648  *
5649  * Returns %true if @p has been consumed, %false if @p had already been consumed
5650  * or dequeued.
5651  */
scx_bpf_dsq_move(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)5652 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter,
5653 				  struct task_struct *p, u64 dsq_id,
5654 				  u64 enq_flags)
5655 {
5656 	return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
5657 			    p, dsq_id, enq_flags);
5658 }
5659 
5660 /**
5661  * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ
5662  * @it__iter: DSQ iterator in progress
5663  * @p: task to transfer
5664  * @dsq_id: DSQ to move @p to
5665  * @enq_flags: SCX_ENQ_*
5666  *
5667  * Transfer @p which is on the DSQ currently iterated by @it__iter to the
5668  * priority queue of the DSQ specified by @dsq_id. The destination must be a
5669  * user DSQ as only user DSQs support priority queue.
5670  *
5671  * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice()
5672  * and scx_bpf_dsq_move_set_vtime() to update.
5673  *
5674  * All other aspects are identical to scx_bpf_dsq_move(). See
5675  * scx_bpf_dsq_insert_vtime() for more information on @vtime.
5676  */
scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)5677 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter,
5678 					struct task_struct *p, u64 dsq_id,
5679 					u64 enq_flags)
5680 {
5681 	return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
5682 			    p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
5683 }
5684 
5685 __bpf_kfunc_end_defs();
5686 
5687 BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
5688 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
5689 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
5690 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local)
5691 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
5692 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
5693 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
5694 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
5695 BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
5696 
5697 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
5698 	.owner			= THIS_MODULE,
5699 	.set			= &scx_kfunc_ids_dispatch,
5700 };
5701 
5702 __bpf_kfunc_start_defs();
5703 
5704 /**
5705  * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
5706  *
5707  * Iterate over all of the tasks currently enqueued on the local DSQ of the
5708  * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
5709  * processed tasks. Can only be called from ops.cpu_release().
5710  */
scx_bpf_reenqueue_local(void)5711 __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
5712 {
5713 	struct scx_sched *sch;
5714 	LIST_HEAD(tasks);
5715 	u32 nr_enqueued = 0;
5716 	struct rq *rq;
5717 	struct task_struct *p, *n;
5718 
5719 	guard(rcu)();
5720 	sch = rcu_dereference(scx_root);
5721 	if (unlikely(!sch))
5722 		return 0;
5723 
5724 	if (!scx_kf_allowed(sch, SCX_KF_CPU_RELEASE))
5725 		return 0;
5726 
5727 	rq = cpu_rq(smp_processor_id());
5728 	lockdep_assert_rq_held(rq);
5729 
5730 	/*
5731 	 * The BPF scheduler may choose to dispatch tasks back to
5732 	 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
5733 	 * first to avoid processing the same tasks repeatedly.
5734 	 */
5735 	list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
5736 				 scx.dsq_list.node) {
5737 		/*
5738 		 * If @p is being migrated, @p's current CPU may not agree with
5739 		 * its allowed CPUs and the migration_cpu_stop is about to
5740 		 * deactivate and re-activate @p anyway. Skip re-enqueueing.
5741 		 *
5742 		 * While racing sched property changes may also dequeue and
5743 		 * re-enqueue a migrating task while its current CPU and allowed
5744 		 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
5745 		 * the current local DSQ for running tasks and thus are not
5746 		 * visible to the BPF scheduler.
5747 		 */
5748 		if (p->migration_pending)
5749 			continue;
5750 
5751 		dispatch_dequeue(rq, p);
5752 		list_add_tail(&p->scx.dsq_list.node, &tasks);
5753 	}
5754 
5755 	list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
5756 		list_del_init(&p->scx.dsq_list.node);
5757 		do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
5758 		nr_enqueued++;
5759 	}
5760 
5761 	return nr_enqueued;
5762 }
5763 
5764 __bpf_kfunc_end_defs();
5765 
5766 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
5767 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
5768 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
5769 
5770 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
5771 	.owner			= THIS_MODULE,
5772 	.set			= &scx_kfunc_ids_cpu_release,
5773 };
5774 
5775 __bpf_kfunc_start_defs();
5776 
5777 /**
5778  * scx_bpf_create_dsq - Create a custom DSQ
5779  * @dsq_id: DSQ to create
5780  * @node: NUMA node to allocate from
5781  *
5782  * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
5783  * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
5784  */
scx_bpf_create_dsq(u64 dsq_id,s32 node)5785 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
5786 {
5787 	struct scx_dispatch_q *dsq;
5788 	struct scx_sched *sch;
5789 	s32 ret;
5790 
5791 	if (unlikely(node >= (int)nr_node_ids ||
5792 		     (node < 0 && node != NUMA_NO_NODE)))
5793 		return -EINVAL;
5794 
5795 	if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN))
5796 		return -EINVAL;
5797 
5798 	dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
5799 	if (!dsq)
5800 		return -ENOMEM;
5801 
5802 	init_dsq(dsq, dsq_id);
5803 
5804 	rcu_read_lock();
5805 
5806 	sch = rcu_dereference(scx_root);
5807 	if (sch)
5808 		ret = rhashtable_lookup_insert_fast(&sch->dsq_hash, &dsq->hash_node,
5809 						    dsq_hash_params);
5810 	else
5811 		ret = -ENODEV;
5812 
5813 	rcu_read_unlock();
5814 	if (ret)
5815 		kfree(dsq);
5816 	return ret;
5817 }
5818 
5819 __bpf_kfunc_end_defs();
5820 
5821 BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
5822 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
5823 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
5824 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
5825 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
5826 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
5827 BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
5828 
5829 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
5830 	.owner			= THIS_MODULE,
5831 	.set			= &scx_kfunc_ids_unlocked,
5832 };
5833 
5834 __bpf_kfunc_start_defs();
5835 
scx_kick_cpu(struct scx_sched * sch,s32 cpu,u64 flags)5836 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags)
5837 {
5838 	struct rq *this_rq;
5839 	unsigned long irq_flags;
5840 
5841 	if (!ops_cpu_valid(sch, cpu, NULL))
5842 		return;
5843 
5844 	local_irq_save(irq_flags);
5845 
5846 	this_rq = this_rq();
5847 
5848 	/*
5849 	 * While bypassing for PM ops, IRQ handling may not be online which can
5850 	 * lead to irq_work_queue() malfunction such as infinite busy wait for
5851 	 * IRQ status update. Suppress kicking.
5852 	 */
5853 	if (scx_rq_bypassing(this_rq))
5854 		goto out;
5855 
5856 	/*
5857 	 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
5858 	 * rq locks. We can probably be smarter and avoid bouncing if called
5859 	 * from ops which don't hold a rq lock.
5860 	 */
5861 	if (flags & SCX_KICK_IDLE) {
5862 		struct rq *target_rq = cpu_rq(cpu);
5863 
5864 		if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
5865 			scx_error(sch, "PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
5866 
5867 		if (raw_spin_rq_trylock(target_rq)) {
5868 			if (can_skip_idle_kick(target_rq)) {
5869 				raw_spin_rq_unlock(target_rq);
5870 				goto out;
5871 			}
5872 			raw_spin_rq_unlock(target_rq);
5873 		}
5874 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
5875 	} else {
5876 		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
5877 
5878 		if (flags & SCX_KICK_PREEMPT)
5879 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
5880 		if (flags & SCX_KICK_WAIT)
5881 			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
5882 	}
5883 
5884 	irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
5885 out:
5886 	local_irq_restore(irq_flags);
5887 }
5888 
5889 /**
5890  * scx_bpf_kick_cpu - Trigger reschedule on a CPU
5891  * @cpu: cpu to kick
5892  * @flags: %SCX_KICK_* flags
5893  *
5894  * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
5895  * trigger rescheduling on a busy CPU. This can be called from any online
5896  * scx_ops operation and the actual kicking is performed asynchronously through
5897  * an irq work.
5898  */
scx_bpf_kick_cpu(s32 cpu,u64 flags)5899 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
5900 {
5901 	struct scx_sched *sch;
5902 
5903 	guard(rcu)();
5904 	sch = rcu_dereference(scx_root);
5905 	if (likely(sch))
5906 		scx_kick_cpu(sch, cpu, flags);
5907 }
5908 
5909 /**
5910  * scx_bpf_dsq_nr_queued - Return the number of queued tasks
5911  * @dsq_id: id of the DSQ
5912  *
5913  * Return the number of tasks in the DSQ matching @dsq_id. If not found,
5914  * -%ENOENT is returned.
5915  */
scx_bpf_dsq_nr_queued(u64 dsq_id)5916 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
5917 {
5918 	struct scx_sched *sch;
5919 	struct scx_dispatch_q *dsq;
5920 	s32 ret;
5921 
5922 	preempt_disable();
5923 
5924 	sch = rcu_dereference_sched(scx_root);
5925 	if (unlikely(!sch)) {
5926 		ret = -ENODEV;
5927 		goto out;
5928 	}
5929 
5930 	if (dsq_id == SCX_DSQ_LOCAL) {
5931 		ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
5932 		goto out;
5933 	} else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
5934 		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
5935 
5936 		if (ops_cpu_valid(sch, cpu, NULL)) {
5937 			ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
5938 			goto out;
5939 		}
5940 	} else {
5941 		dsq = find_user_dsq(sch, dsq_id);
5942 		if (dsq) {
5943 			ret = READ_ONCE(dsq->nr);
5944 			goto out;
5945 		}
5946 	}
5947 	ret = -ENOENT;
5948 out:
5949 	preempt_enable();
5950 	return ret;
5951 }
5952 
5953 /**
5954  * scx_bpf_destroy_dsq - Destroy a custom DSQ
5955  * @dsq_id: DSQ to destroy
5956  *
5957  * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
5958  * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
5959  * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
5960  * which doesn't exist. Can be called from any online scx_ops operations.
5961  */
scx_bpf_destroy_dsq(u64 dsq_id)5962 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
5963 {
5964 	struct scx_sched *sch;
5965 
5966 	rcu_read_lock();
5967 	sch = rcu_dereference(scx_root);
5968 	if (sch)
5969 		destroy_dsq(sch, dsq_id);
5970 	rcu_read_unlock();
5971 }
5972 
5973 /**
5974  * bpf_iter_scx_dsq_new - Create a DSQ iterator
5975  * @it: iterator to initialize
5976  * @dsq_id: DSQ to iterate
5977  * @flags: %SCX_DSQ_ITER_*
5978  *
5979  * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
5980  * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
5981  * tasks which are already queued when this function is invoked.
5982  */
bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq * it,u64 dsq_id,u64 flags)5983 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
5984 				     u64 flags)
5985 {
5986 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
5987 	struct scx_sched *sch;
5988 
5989 	BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
5990 		     sizeof(struct bpf_iter_scx_dsq));
5991 	BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
5992 		     __alignof__(struct bpf_iter_scx_dsq));
5993 
5994 	/*
5995 	 * next() and destroy() will be called regardless of the return value.
5996 	 * Always clear $kit->dsq.
5997 	 */
5998 	kit->dsq = NULL;
5999 
6000 	sch = rcu_dereference_check(scx_root, rcu_read_lock_bh_held());
6001 	if (unlikely(!sch))
6002 		return -ENODEV;
6003 
6004 	if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
6005 		return -EINVAL;
6006 
6007 	kit->dsq = find_user_dsq(sch, dsq_id);
6008 	if (!kit->dsq)
6009 		return -ENOENT;
6010 
6011 	INIT_LIST_HEAD(&kit->cursor.node);
6012 	kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags;
6013 	kit->cursor.priv = READ_ONCE(kit->dsq->seq);
6014 
6015 	return 0;
6016 }
6017 
6018 /**
6019  * bpf_iter_scx_dsq_next - Progress a DSQ iterator
6020  * @it: iterator to progress
6021  *
6022  * Return the next task. See bpf_iter_scx_dsq_new().
6023  */
bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq * it)6024 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
6025 {
6026 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6027 	bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
6028 	struct task_struct *p;
6029 	unsigned long flags;
6030 
6031 	if (!kit->dsq)
6032 		return NULL;
6033 
6034 	raw_spin_lock_irqsave(&kit->dsq->lock, flags);
6035 
6036 	if (list_empty(&kit->cursor.node))
6037 		p = NULL;
6038 	else
6039 		p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
6040 
6041 	/*
6042 	 * Only tasks which were queued before the iteration started are
6043 	 * visible. This bounds BPF iterations and guarantees that vtime never
6044 	 * jumps in the other direction while iterating.
6045 	 */
6046 	do {
6047 		p = nldsq_next_task(kit->dsq, p, rev);
6048 	} while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
6049 
6050 	if (p) {
6051 		if (rev)
6052 			list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
6053 		else
6054 			list_move(&kit->cursor.node, &p->scx.dsq_list.node);
6055 	} else {
6056 		list_del_init(&kit->cursor.node);
6057 	}
6058 
6059 	raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
6060 
6061 	return p;
6062 }
6063 
6064 /**
6065  * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
6066  * @it: iterator to destroy
6067  *
6068  * Undo scx_iter_scx_dsq_new().
6069  */
bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq * it)6070 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
6071 {
6072 	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6073 
6074 	if (!kit->dsq)
6075 		return;
6076 
6077 	if (!list_empty(&kit->cursor.node)) {
6078 		unsigned long flags;
6079 
6080 		raw_spin_lock_irqsave(&kit->dsq->lock, flags);
6081 		list_del_init(&kit->cursor.node);
6082 		raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
6083 	}
6084 	kit->dsq = NULL;
6085 }
6086 
6087 __bpf_kfunc_end_defs();
6088 
__bstr_format(struct scx_sched * sch,u64 * data_buf,char * line_buf,size_t line_size,char * fmt,unsigned long long * data,u32 data__sz)6089 static s32 __bstr_format(struct scx_sched *sch, u64 *data_buf, char *line_buf,
6090 			 size_t line_size, char *fmt, unsigned long long *data,
6091 			 u32 data__sz)
6092 {
6093 	struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
6094 	s32 ret;
6095 
6096 	if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
6097 	    (data__sz && !data)) {
6098 		scx_error(sch, "invalid data=%p and data__sz=%u", (void *)data, data__sz);
6099 		return -EINVAL;
6100 	}
6101 
6102 	ret = copy_from_kernel_nofault(data_buf, data, data__sz);
6103 	if (ret < 0) {
6104 		scx_error(sch, "failed to read data fields (%d)", ret);
6105 		return ret;
6106 	}
6107 
6108 	ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
6109 				  &bprintf_data);
6110 	if (ret < 0) {
6111 		scx_error(sch, "format preparation failed (%d)", ret);
6112 		return ret;
6113 	}
6114 
6115 	ret = bstr_printf(line_buf, line_size, fmt,
6116 			  bprintf_data.bin_args);
6117 	bpf_bprintf_cleanup(&bprintf_data);
6118 	if (ret < 0) {
6119 		scx_error(sch, "(\"%s\", %p, %u) failed to format", fmt, data, data__sz);
6120 		return ret;
6121 	}
6122 
6123 	return ret;
6124 }
6125 
bstr_format(struct scx_sched * sch,struct scx_bstr_buf * buf,char * fmt,unsigned long long * data,u32 data__sz)6126 static s32 bstr_format(struct scx_sched *sch, struct scx_bstr_buf *buf,
6127 		       char *fmt, unsigned long long *data, u32 data__sz)
6128 {
6129 	return __bstr_format(sch, buf->data, buf->line, sizeof(buf->line),
6130 			     fmt, data, data__sz);
6131 }
6132 
6133 __bpf_kfunc_start_defs();
6134 
6135 /**
6136  * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
6137  * @exit_code: Exit value to pass to user space via struct scx_exit_info.
6138  * @fmt: error message format string
6139  * @data: format string parameters packaged using ___bpf_fill() macro
6140  * @data__sz: @data len, must end in '__sz' for the verifier
6141  *
6142  * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
6143  * disabling.
6144  */
scx_bpf_exit_bstr(s64 exit_code,char * fmt,unsigned long long * data,u32 data__sz)6145 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
6146 				   unsigned long long *data, u32 data__sz)
6147 {
6148 	struct scx_sched *sch;
6149 	unsigned long flags;
6150 
6151 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
6152 	sch = rcu_dereference_bh(scx_root);
6153 	if (likely(sch) &&
6154 	    bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
6155 		scx_exit(sch, SCX_EXIT_UNREG_BPF, exit_code, "%s", scx_exit_bstr_buf.line);
6156 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
6157 }
6158 
6159 /**
6160  * scx_bpf_error_bstr - Indicate fatal error
6161  * @fmt: error message format string
6162  * @data: format string parameters packaged using ___bpf_fill() macro
6163  * @data__sz: @data len, must end in '__sz' for the verifier
6164  *
6165  * Indicate that the BPF scheduler encountered a fatal error and initiate ops
6166  * disabling.
6167  */
scx_bpf_error_bstr(char * fmt,unsigned long long * data,u32 data__sz)6168 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
6169 				    u32 data__sz)
6170 {
6171 	struct scx_sched *sch;
6172 	unsigned long flags;
6173 
6174 	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
6175 	sch = rcu_dereference_bh(scx_root);
6176 	if (likely(sch) &&
6177 	    bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
6178 		scx_exit(sch, SCX_EXIT_ERROR_BPF, 0, "%s", scx_exit_bstr_buf.line);
6179 	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
6180 }
6181 
6182 /**
6183  * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler
6184  * @fmt: format string
6185  * @data: format string parameters packaged using ___bpf_fill() macro
6186  * @data__sz: @data len, must end in '__sz' for the verifier
6187  *
6188  * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
6189  * dump_task() to generate extra debug dump specific to the BPF scheduler.
6190  *
6191  * The extra dump may be multiple lines. A single line may be split over
6192  * multiple calls. The last line is automatically terminated.
6193  */
scx_bpf_dump_bstr(char * fmt,unsigned long long * data,u32 data__sz)6194 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
6195 				   u32 data__sz)
6196 {
6197 	struct scx_sched *sch;
6198 	struct scx_dump_data *dd = &scx_dump_data;
6199 	struct scx_bstr_buf *buf = &dd->buf;
6200 	s32 ret;
6201 
6202 	guard(rcu)();
6203 
6204 	sch = rcu_dereference(scx_root);
6205 	if (unlikely(!sch))
6206 		return;
6207 
6208 	if (raw_smp_processor_id() != dd->cpu) {
6209 		scx_error(sch, "scx_bpf_dump() must only be called from ops.dump() and friends");
6210 		return;
6211 	}
6212 
6213 	/* append the formatted string to the line buf */
6214 	ret = __bstr_format(sch, buf->data, buf->line + dd->cursor,
6215 			    sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
6216 	if (ret < 0) {
6217 		dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
6218 			  dd->prefix, fmt, data, data__sz, ret);
6219 		return;
6220 	}
6221 
6222 	dd->cursor += ret;
6223 	dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
6224 
6225 	if (!dd->cursor)
6226 		return;
6227 
6228 	/*
6229 	 * If the line buf overflowed or ends in a newline, flush it into the
6230 	 * dump. This is to allow the caller to generate a single line over
6231 	 * multiple calls. As ops_dump_flush() can also handle multiple lines in
6232 	 * the line buf, the only case which can lead to an unexpected
6233 	 * truncation is when the caller keeps generating newlines in the middle
6234 	 * instead of the end consecutively. Don't do that.
6235 	 */
6236 	if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
6237 		ops_dump_flush();
6238 }
6239 
6240 /**
6241  * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
6242  * @cpu: CPU of interest
6243  *
6244  * Return the maximum relative capacity of @cpu in relation to the most
6245  * performant CPU in the system. The return value is in the range [1,
6246  * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
6247  */
scx_bpf_cpuperf_cap(s32 cpu)6248 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
6249 {
6250 	struct scx_sched *sch;
6251 
6252 	guard(rcu)();
6253 
6254 	sch = rcu_dereference(scx_root);
6255 	if (likely(sch) && ops_cpu_valid(sch, cpu, NULL))
6256 		return arch_scale_cpu_capacity(cpu);
6257 	else
6258 		return SCX_CPUPERF_ONE;
6259 }
6260 
6261 /**
6262  * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
6263  * @cpu: CPU of interest
6264  *
6265  * Return the current relative performance of @cpu in relation to its maximum.
6266  * The return value is in the range [1, %SCX_CPUPERF_ONE].
6267  *
6268  * The current performance level of a CPU in relation to the maximum performance
6269  * available in the system can be calculated as follows:
6270  *
6271  *   scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
6272  *
6273  * The result is in the range [1, %SCX_CPUPERF_ONE].
6274  */
scx_bpf_cpuperf_cur(s32 cpu)6275 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
6276 {
6277 	struct scx_sched *sch;
6278 
6279 	guard(rcu)();
6280 
6281 	sch = rcu_dereference(scx_root);
6282 	if (likely(sch) && ops_cpu_valid(sch, cpu, NULL))
6283 		return arch_scale_freq_capacity(cpu);
6284 	else
6285 		return SCX_CPUPERF_ONE;
6286 }
6287 
6288 /**
6289  * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
6290  * @cpu: CPU of interest
6291  * @perf: target performance level [0, %SCX_CPUPERF_ONE]
6292  *
6293  * Set the target performance level of @cpu to @perf. @perf is in linear
6294  * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
6295  * schedutil cpufreq governor chooses the target frequency.
6296  *
6297  * The actual performance level chosen, CPU grouping, and the overhead and
6298  * latency of the operations are dependent on the hardware and cpufreq driver in
6299  * use. Consult hardware and cpufreq documentation for more information. The
6300  * current performance level can be monitored using scx_bpf_cpuperf_cur().
6301  */
scx_bpf_cpuperf_set(s32 cpu,u32 perf)6302 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
6303 {
6304 	struct scx_sched *sch;
6305 
6306 	guard(rcu)();
6307 
6308 	sch = rcu_dereference(sch);
6309 	if (unlikely(!sch))
6310 		return;
6311 
6312 	if (unlikely(perf > SCX_CPUPERF_ONE)) {
6313 		scx_error(sch, "Invalid cpuperf target %u for CPU %d", perf, cpu);
6314 		return;
6315 	}
6316 
6317 	if (ops_cpu_valid(sch, cpu, NULL)) {
6318 		struct rq *rq = cpu_rq(cpu), *locked_rq = scx_locked_rq();
6319 		struct rq_flags rf;
6320 
6321 		/*
6322 		 * When called with an rq lock held, restrict the operation
6323 		 * to the corresponding CPU to prevent ABBA deadlocks.
6324 		 */
6325 		if (locked_rq && rq != locked_rq) {
6326 			scx_error(sch, "Invalid target CPU %d", cpu);
6327 			return;
6328 		}
6329 
6330 		/*
6331 		 * If no rq lock is held, allow to operate on any CPU by
6332 		 * acquiring the corresponding rq lock.
6333 		 */
6334 		if (!locked_rq) {
6335 			rq_lock_irqsave(rq, &rf);
6336 			update_rq_clock(rq);
6337 		}
6338 
6339 		rq->scx.cpuperf_target = perf;
6340 		cpufreq_update_util(rq, 0);
6341 
6342 		if (!locked_rq)
6343 			rq_unlock_irqrestore(rq, &rf);
6344 	}
6345 }
6346 
6347 /**
6348  * scx_bpf_nr_node_ids - Return the number of possible node IDs
6349  *
6350  * All valid node IDs in the system are smaller than the returned value.
6351  */
scx_bpf_nr_node_ids(void)6352 __bpf_kfunc u32 scx_bpf_nr_node_ids(void)
6353 {
6354 	return nr_node_ids;
6355 }
6356 
6357 /**
6358  * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
6359  *
6360  * All valid CPU IDs in the system are smaller than the returned value.
6361  */
scx_bpf_nr_cpu_ids(void)6362 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
6363 {
6364 	return nr_cpu_ids;
6365 }
6366 
6367 /**
6368  * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
6369  */
scx_bpf_get_possible_cpumask(void)6370 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
6371 {
6372 	return cpu_possible_mask;
6373 }
6374 
6375 /**
6376  * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
6377  */
scx_bpf_get_online_cpumask(void)6378 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
6379 {
6380 	return cpu_online_mask;
6381 }
6382 
6383 /**
6384  * scx_bpf_put_cpumask - Release a possible/online cpumask
6385  * @cpumask: cpumask to release
6386  */
scx_bpf_put_cpumask(const struct cpumask * cpumask)6387 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
6388 {
6389 	/*
6390 	 * Empty function body because we aren't actually acquiring or releasing
6391 	 * a reference to a global cpumask, which is read-only in the caller and
6392 	 * is never released. The acquire / release semantics here are just used
6393 	 * to make the cpumask is a trusted pointer in the caller.
6394 	 */
6395 }
6396 
6397 /**
6398  * scx_bpf_task_running - Is task currently running?
6399  * @p: task of interest
6400  */
scx_bpf_task_running(const struct task_struct * p)6401 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
6402 {
6403 	return task_rq(p)->curr == p;
6404 }
6405 
6406 /**
6407  * scx_bpf_task_cpu - CPU a task is currently associated with
6408  * @p: task of interest
6409  */
scx_bpf_task_cpu(const struct task_struct * p)6410 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
6411 {
6412 	return task_cpu(p);
6413 }
6414 
6415 /**
6416  * scx_bpf_cpu_rq - Fetch the rq of a CPU
6417  * @cpu: CPU of the rq
6418  */
scx_bpf_cpu_rq(s32 cpu)6419 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
6420 {
6421 	struct scx_sched *sch;
6422 
6423 	guard(rcu)();
6424 
6425 	sch = rcu_dereference(scx_root);
6426 	if (unlikely(!sch))
6427 		return NULL;
6428 
6429 	if (!ops_cpu_valid(sch, cpu, NULL))
6430 		return NULL;
6431 
6432 	if (!sch->warned_deprecated_rq) {
6433 		printk_deferred(KERN_WARNING "sched_ext: %s() is deprecated; "
6434 				"use scx_bpf_locked_rq() when holding rq lock "
6435 				"or scx_bpf_cpu_curr() to read remote curr safely.\n", __func__);
6436 		sch->warned_deprecated_rq = true;
6437 	}
6438 
6439 	return cpu_rq(cpu);
6440 }
6441 
6442 /**
6443  * scx_bpf_locked_rq - Return the rq currently locked by SCX
6444  *
6445  * Returns the rq if a rq lock is currently held by SCX.
6446  * Otherwise emits an error and returns NULL.
6447  */
scx_bpf_locked_rq(void)6448 __bpf_kfunc struct rq *scx_bpf_locked_rq(void)
6449 {
6450 	struct scx_sched *sch;
6451 	struct rq *rq;
6452 
6453 	guard(preempt)();
6454 
6455 	sch = rcu_dereference_sched(scx_root);
6456 	if (unlikely(!sch))
6457 		return NULL;
6458 
6459 	rq = scx_locked_rq();
6460 	if (!rq) {
6461 		scx_error(sch, "accessing rq without holding rq lock");
6462 		return NULL;
6463 	}
6464 
6465 	return rq;
6466 }
6467 
6468 /**
6469  * scx_bpf_cpu_curr - Return remote CPU's curr task
6470  * @cpu: CPU of interest
6471  *
6472  * Callers must hold RCU read lock (KF_RCU).
6473  */
scx_bpf_cpu_curr(s32 cpu)6474 __bpf_kfunc struct task_struct *scx_bpf_cpu_curr(s32 cpu)
6475 {
6476 	struct scx_sched *sch;
6477 
6478 	guard(rcu)();
6479 
6480 	sch = rcu_dereference(scx_root);
6481 	if (unlikely(!sch))
6482 		return NULL;
6483 
6484 	if (!ops_cpu_valid(sch, cpu, NULL))
6485 		return NULL;
6486 
6487 	return rcu_dereference(cpu_rq(cpu)->curr);
6488 }
6489 
6490 /**
6491  * scx_bpf_task_cgroup - Return the sched cgroup of a task
6492  * @p: task of interest
6493  *
6494  * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
6495  * from the scheduler's POV. SCX operations should use this function to
6496  * determine @p's current cgroup as, unlike following @p->cgroups,
6497  * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
6498  * rq-locked operations. Can be called on the parameter tasks of rq-locked
6499  * operations. The restriction guarantees that @p's rq is locked by the caller.
6500  */
6501 #ifdef CONFIG_CGROUP_SCHED
scx_bpf_task_cgroup(struct task_struct * p)6502 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
6503 {
6504 	struct task_group *tg = p->sched_task_group;
6505 	struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
6506 	struct scx_sched *sch;
6507 
6508 	guard(rcu)();
6509 
6510 	sch = rcu_dereference(scx_root);
6511 	if (unlikely(!sch))
6512 		goto out;
6513 
6514 	if (!scx_kf_allowed_on_arg_tasks(sch, __SCX_KF_RQ_LOCKED, p))
6515 		goto out;
6516 
6517 	cgrp = tg_cgrp(tg);
6518 
6519 out:
6520 	cgroup_get(cgrp);
6521 	return cgrp;
6522 }
6523 #endif
6524 
6525 /**
6526  * scx_bpf_now - Returns a high-performance monotonically non-decreasing
6527  * clock for the current CPU. The clock returned is in nanoseconds.
6528  *
6529  * It provides the following properties:
6530  *
6531  * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently
6532  *  to account for execution time and track tasks' runtime properties.
6533  *  Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which
6534  *  eventually reads a hardware timestamp counter -- is neither performant nor
6535  *  scalable. scx_bpf_now() aims to provide a high-performance clock by
6536  *  using the rq clock in the scheduler core whenever possible.
6537  *
6538  * 2) High enough resolution for the BPF scheduler use cases: In most BPF
6539  *  scheduler use cases, the required clock resolution is lower than the most
6540  *  accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically
6541  *  uses the rq clock in the scheduler core whenever it is valid. It considers
6542  *  that the rq clock is valid from the time the rq clock is updated
6543  *  (update_rq_clock) until the rq is unlocked (rq_unpin_lock).
6544  *
6545  * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now()
6546  *  guarantees the clock never goes backward when comparing them in the same
6547  *  CPU. On the other hand, when comparing clocks in different CPUs, there
6548  *  is no such guarantee -- the clock can go backward. It provides a
6549  *  monotonically *non-decreasing* clock so that it would provide the same
6550  *  clock values in two different scx_bpf_now() calls in the same CPU
6551  *  during the same period of when the rq clock is valid.
6552  */
scx_bpf_now(void)6553 __bpf_kfunc u64 scx_bpf_now(void)
6554 {
6555 	struct rq *rq;
6556 	u64 clock;
6557 
6558 	preempt_disable();
6559 
6560 	rq = this_rq();
6561 	if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) {
6562 		/*
6563 		 * If the rq clock is valid, use the cached rq clock.
6564 		 *
6565 		 * Note that scx_bpf_now() is re-entrant between a process
6566 		 * context and an interrupt context (e.g., timer interrupt).
6567 		 * However, we don't need to consider the race between them
6568 		 * because such race is not observable from a caller.
6569 		 */
6570 		clock = READ_ONCE(rq->scx.clock);
6571 	} else {
6572 		/*
6573 		 * Otherwise, return a fresh rq clock.
6574 		 *
6575 		 * The rq clock is updated outside of the rq lock.
6576 		 * In this case, keep the updated rq clock invalid so the next
6577 		 * kfunc call outside the rq lock gets a fresh rq clock.
6578 		 */
6579 		clock = sched_clock_cpu(cpu_of(rq));
6580 	}
6581 
6582 	preempt_enable();
6583 
6584 	return clock;
6585 }
6586 
scx_read_events(struct scx_sched * sch,struct scx_event_stats * events)6587 static void scx_read_events(struct scx_sched *sch, struct scx_event_stats *events)
6588 {
6589 	struct scx_event_stats *e_cpu;
6590 	int cpu;
6591 
6592 	/* Aggregate per-CPU event counters into @events. */
6593 	memset(events, 0, sizeof(*events));
6594 	for_each_possible_cpu(cpu) {
6595 		e_cpu = &per_cpu_ptr(sch->pcpu, cpu)->event_stats;
6596 		scx_agg_event(events, e_cpu, SCX_EV_SELECT_CPU_FALLBACK);
6597 		scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
6598 		scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_KEEP_LAST);
6599 		scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_EXITING);
6600 		scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
6601 		scx_agg_event(events, e_cpu, SCX_EV_REFILL_SLICE_DFL);
6602 		scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DURATION);
6603 		scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DISPATCH);
6604 		scx_agg_event(events, e_cpu, SCX_EV_BYPASS_ACTIVATE);
6605 	}
6606 }
6607 
6608 /*
6609  * scx_bpf_events - Get a system-wide event counter to
6610  * @events: output buffer from a BPF program
6611  * @events__sz: @events len, must end in '__sz'' for the verifier
6612  */
scx_bpf_events(struct scx_event_stats * events,size_t events__sz)6613 __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events,
6614 				size_t events__sz)
6615 {
6616 	struct scx_sched *sch;
6617 	struct scx_event_stats e_sys;
6618 
6619 	rcu_read_lock();
6620 	sch = rcu_dereference(scx_root);
6621 	if (sch)
6622 		scx_read_events(sch, &e_sys);
6623 	else
6624 		memset(&e_sys, 0, sizeof(e_sys));
6625 	rcu_read_unlock();
6626 
6627 	/*
6628 	 * We cannot entirely trust a BPF-provided size since a BPF program
6629 	 * might be compiled against a different vmlinux.h, of which
6630 	 * scx_event_stats would be larger (a newer vmlinux.h) or smaller
6631 	 * (an older vmlinux.h). Hence, we use the smaller size to avoid
6632 	 * memory corruption.
6633 	 */
6634 	events__sz = min(events__sz, sizeof(*events));
6635 	memcpy(events, &e_sys, events__sz);
6636 }
6637 
6638 __bpf_kfunc_end_defs();
6639 
6640 BTF_KFUNCS_START(scx_kfunc_ids_any)
6641 BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
6642 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
6643 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
6644 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
6645 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
6646 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
6647 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
6648 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
6649 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
6650 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
6651 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
6652 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
6653 BTF_ID_FLAGS(func, scx_bpf_nr_node_ids)
6654 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
6655 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
6656 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
6657 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
6658 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
6659 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
6660 BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
6661 BTF_ID_FLAGS(func, scx_bpf_locked_rq, KF_RET_NULL)
6662 BTF_ID_FLAGS(func, scx_bpf_cpu_curr, KF_RET_NULL | KF_RCU_PROTECTED)
6663 #ifdef CONFIG_CGROUP_SCHED
6664 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
6665 #endif
6666 BTF_ID_FLAGS(func, scx_bpf_now)
6667 BTF_ID_FLAGS(func, scx_bpf_events, KF_TRUSTED_ARGS)
6668 BTF_KFUNCS_END(scx_kfunc_ids_any)
6669 
6670 static const struct btf_kfunc_id_set scx_kfunc_set_any = {
6671 	.owner			= THIS_MODULE,
6672 	.set			= &scx_kfunc_ids_any,
6673 };
6674 
scx_init(void)6675 static int __init scx_init(void)
6676 {
6677 	int ret;
6678 
6679 	/*
6680 	 * kfunc registration can't be done from init_sched_ext_class() as
6681 	 * register_btf_kfunc_id_set() needs most of the system to be up.
6682 	 *
6683 	 * Some kfuncs are context-sensitive and can only be called from
6684 	 * specific SCX ops. They are grouped into BTF sets accordingly.
6685 	 * Unfortunately, BPF currently doesn't have a way of enforcing such
6686 	 * restrictions. Eventually, the verifier should be able to enforce
6687 	 * them. For now, register them the same and make each kfunc explicitly
6688 	 * check using scx_kf_allowed().
6689 	 */
6690 	if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
6691 					     &scx_kfunc_set_enqueue_dispatch)) ||
6692 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
6693 					     &scx_kfunc_set_dispatch)) ||
6694 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
6695 					     &scx_kfunc_set_cpu_release)) ||
6696 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
6697 					     &scx_kfunc_set_unlocked)) ||
6698 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
6699 					     &scx_kfunc_set_unlocked)) ||
6700 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
6701 					     &scx_kfunc_set_any)) ||
6702 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
6703 					     &scx_kfunc_set_any)) ||
6704 	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
6705 					     &scx_kfunc_set_any))) {
6706 		pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
6707 		return ret;
6708 	}
6709 
6710 	ret = scx_idle_init();
6711 	if (ret) {
6712 		pr_err("sched_ext: Failed to initialize idle tracking (%d)\n", ret);
6713 		return ret;
6714 	}
6715 
6716 	ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
6717 	if (ret) {
6718 		pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
6719 		return ret;
6720 	}
6721 
6722 	ret = register_pm_notifier(&scx_pm_notifier);
6723 	if (ret) {
6724 		pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
6725 		return ret;
6726 	}
6727 
6728 	scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
6729 	if (!scx_kset) {
6730 		pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
6731 		return -ENOMEM;
6732 	}
6733 
6734 	ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
6735 	if (ret < 0) {
6736 		pr_err("sched_ext: Failed to add global attributes\n");
6737 		return ret;
6738 	}
6739 
6740 	return 0;
6741 }
6742 __initcall(scx_init);
6743