1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4 *
5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8 */
9 #include <linux/btf_ids.h>
10 #include "ext_idle.h"
11
12 static DEFINE_RAW_SPINLOCK(scx_sched_lock);
13
14 /*
15 * NOTE: sched_ext is in the process of growing multiple scheduler support and
16 * scx_root usage is in a transitional state. Naked dereferences are safe if the
17 * caller is one of the tasks attached to SCX and explicit RCU dereference is
18 * necessary otherwise. Naked scx_root dereferences trigger sparse warnings but
19 * are used as temporary markers to indicate that the dereferences need to be
20 * updated to point to the associated scheduler instances rather than scx_root.
21 */
22 struct scx_sched __rcu *scx_root;
23
24 /*
25 * All scheds, writers must hold both scx_enable_mutex and scx_sched_lock.
26 * Readers can hold either or rcu_read_lock().
27 */
28 static LIST_HEAD(scx_sched_all);
29
30 #ifdef CONFIG_EXT_SUB_SCHED
31 static const struct rhashtable_params scx_sched_hash_params = {
32 .key_len = sizeof_field(struct scx_sched, ops.sub_cgroup_id),
33 .key_offset = offsetof(struct scx_sched, ops.sub_cgroup_id),
34 .head_offset = offsetof(struct scx_sched, hash_node),
35 .insecure_elasticity = true, /* inserted under scx_sched_lock */
36 };
37
38 static struct rhashtable scx_sched_hash;
39 #endif
40
41 /*
42 * During exit, a task may schedule after losing its PIDs. When disabling the
43 * BPF scheduler, we need to be able to iterate tasks in every state to
44 * guarantee system safety. Maintain a dedicated task list which contains every
45 * task between its fork and eventual free.
46 */
47 static DEFINE_RAW_SPINLOCK(scx_tasks_lock);
48 static LIST_HEAD(scx_tasks);
49
50 /* ops enable/disable */
51 static DEFINE_MUTEX(scx_enable_mutex);
52 DEFINE_STATIC_KEY_FALSE(__scx_enabled);
53 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
54 static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED);
55 static DEFINE_RAW_SPINLOCK(scx_bypass_lock);
56 static bool scx_init_task_enabled;
57 static bool scx_switching_all;
58 DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
59
60 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
61 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
62
63 #ifdef CONFIG_EXT_SUB_SCHED
64 /*
65 * The sub sched being enabled. Used by scx_disable_and_exit_task() to exit
66 * tasks for the sub-sched being enabled. Use a global variable instead of a
67 * per-task field as all enables are serialized.
68 */
69 static struct scx_sched *scx_enabling_sub_sched;
70 #else
71 #define scx_enabling_sub_sched (struct scx_sched *)NULL
72 #endif /* CONFIG_EXT_SUB_SCHED */
73
74 /*
75 * A monotonically increasing sequence number that is incremented every time a
76 * scheduler is enabled. This can be used to check if any custom sched_ext
77 * scheduler has ever been used in the system.
78 */
79 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
80
81 /*
82 * Watchdog interval. All scx_sched's share a single watchdog timer and the
83 * interval is half of the shortest sch->watchdog_timeout.
84 */
85 static unsigned long scx_watchdog_interval;
86
87 /*
88 * The last time the delayed work was run. This delayed work relies on
89 * ksoftirqd being able to run to service timer interrupts, so it's possible
90 * that this work itself could get wedged. To account for this, we check that
91 * it's not stalled in the timer tick, and trigger an error if it is.
92 */
93 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
94
95 static struct delayed_work scx_watchdog_work;
96
97 /*
98 * For %SCX_KICK_WAIT: Each CPU has a pointer to an array of kick_sync sequence
99 * numbers. The arrays are allocated with kvzalloc() as size can exceed percpu
100 * allocator limits on large machines. O(nr_cpu_ids^2) allocation, allocated
101 * lazily when enabling and freed when disabling to avoid waste when sched_ext
102 * isn't active.
103 */
104 struct scx_kick_syncs {
105 struct rcu_head rcu;
106 unsigned long syncs[];
107 };
108
109 static DEFINE_PER_CPU(struct scx_kick_syncs __rcu *, scx_kick_syncs);
110
111 /*
112 * Direct dispatch marker.
113 *
114 * Non-NULL values are used for direct dispatch from enqueue path. A valid
115 * pointer points to the task currently being enqueued. An ERR_PTR value is used
116 * to indicate that direct dispatch has already happened.
117 */
118 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
119
120 static const struct rhashtable_params dsq_hash_params = {
121 .key_len = sizeof_field(struct scx_dispatch_q, id),
122 .key_offset = offsetof(struct scx_dispatch_q, id),
123 .head_offset = offsetof(struct scx_dispatch_q, hash_node),
124 };
125
126 static LLIST_HEAD(dsqs_to_free);
127
128 /* string formatting from BPF */
129 struct scx_bstr_buf {
130 u64 data[MAX_BPRINTF_VARARGS];
131 char line[SCX_EXIT_MSG_LEN];
132 };
133
134 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
135 static struct scx_bstr_buf scx_exit_bstr_buf;
136
137 /* ops debug dump */
138 static DEFINE_RAW_SPINLOCK(scx_dump_lock);
139
140 struct scx_dump_data {
141 s32 cpu;
142 bool first;
143 s32 cursor;
144 struct seq_buf *s;
145 const char *prefix;
146 struct scx_bstr_buf buf;
147 };
148
149 static struct scx_dump_data scx_dump_data = {
150 .cpu = -1,
151 };
152
153 /* /sys/kernel/sched_ext interface */
154 static struct kset *scx_kset;
155
156 /*
157 * Parameters that can be adjusted through /sys/module/sched_ext/parameters.
158 * There usually is no reason to modify these as normal scheduler operation
159 * shouldn't be affected by them. The knobs are primarily for debugging.
160 */
161 static unsigned int scx_slice_bypass_us = SCX_SLICE_BYPASS / NSEC_PER_USEC;
162 static unsigned int scx_bypass_lb_intv_us = SCX_BYPASS_LB_DFL_INTV_US;
163
set_slice_us(const char * val,const struct kernel_param * kp)164 static int set_slice_us(const char *val, const struct kernel_param *kp)
165 {
166 return param_set_uint_minmax(val, kp, 100, 100 * USEC_PER_MSEC);
167 }
168
169 static const struct kernel_param_ops slice_us_param_ops = {
170 .set = set_slice_us,
171 .get = param_get_uint,
172 };
173
set_bypass_lb_intv_us(const char * val,const struct kernel_param * kp)174 static int set_bypass_lb_intv_us(const char *val, const struct kernel_param *kp)
175 {
176 return param_set_uint_minmax(val, kp, 0, 10 * USEC_PER_SEC);
177 }
178
179 static const struct kernel_param_ops bypass_lb_intv_us_param_ops = {
180 .set = set_bypass_lb_intv_us,
181 .get = param_get_uint,
182 };
183
184 #undef MODULE_PARAM_PREFIX
185 #define MODULE_PARAM_PREFIX "sched_ext."
186
187 module_param_cb(slice_bypass_us, &slice_us_param_ops, &scx_slice_bypass_us, 0600);
188 MODULE_PARM_DESC(slice_bypass_us, "bypass slice in microseconds, applied on [un]load (100us to 100ms)");
189 module_param_cb(bypass_lb_intv_us, &bypass_lb_intv_us_param_ops, &scx_bypass_lb_intv_us, 0600);
190 MODULE_PARM_DESC(bypass_lb_intv_us, "bypass load balance interval in microseconds (0 (disable) to 10s)");
191
192 #undef MODULE_PARAM_PREFIX
193
194 #define CREATE_TRACE_POINTS
195 #include <trace/events/sched_ext.h>
196
197 static void run_deferred(struct rq *rq);
198 static bool task_dead_and_done(struct task_struct *p);
199 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags);
200 static void scx_disable(struct scx_sched *sch, enum scx_exit_kind kind);
201 static bool scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind,
202 s64 exit_code, const char *fmt, va_list args);
203
scx_exit(struct scx_sched * sch,enum scx_exit_kind kind,s64 exit_code,const char * fmt,...)204 static __printf(4, 5) bool scx_exit(struct scx_sched *sch,
205 enum scx_exit_kind kind, s64 exit_code,
206 const char *fmt, ...)
207 {
208 va_list args;
209 bool ret;
210
211 va_start(args, fmt);
212 ret = scx_vexit(sch, kind, exit_code, fmt, args);
213 va_end(args);
214
215 return ret;
216 }
217
218 #define scx_error(sch, fmt, args...) scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args)
219 #define scx_verror(sch, fmt, args) scx_vexit((sch), SCX_EXIT_ERROR, 0, fmt, args)
220
221 #define SCX_HAS_OP(sch, op) test_bit(SCX_OP_IDX(op), (sch)->has_op)
222
jiffies_delta_msecs(unsigned long at,unsigned long now)223 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
224 {
225 if (time_after(at, now))
226 return jiffies_to_msecs(at - now);
227 else
228 return -(long)jiffies_to_msecs(now - at);
229 }
230
u32_before(u32 a,u32 b)231 static bool u32_before(u32 a, u32 b)
232 {
233 return (s32)(a - b) < 0;
234 }
235
236 #ifdef CONFIG_EXT_SUB_SCHED
237 /**
238 * scx_parent - Find the parent sched
239 * @sch: sched to find the parent of
240 *
241 * Returns the parent scheduler or %NULL if @sch is root.
242 */
scx_parent(struct scx_sched * sch)243 static struct scx_sched *scx_parent(struct scx_sched *sch)
244 {
245 if (sch->level)
246 return sch->ancestors[sch->level - 1];
247 else
248 return NULL;
249 }
250
251 /**
252 * scx_next_descendant_pre - find the next descendant for pre-order walk
253 * @pos: the current position (%NULL to initiate traversal)
254 * @root: sched whose descendants to walk
255 *
256 * To be used by scx_for_each_descendant_pre(). Find the next descendant to
257 * visit for pre-order traversal of @root's descendants. @root is included in
258 * the iteration and the first node to be visited.
259 */
scx_next_descendant_pre(struct scx_sched * pos,struct scx_sched * root)260 static struct scx_sched *scx_next_descendant_pre(struct scx_sched *pos,
261 struct scx_sched *root)
262 {
263 struct scx_sched *next;
264
265 lockdep_assert(lockdep_is_held(&scx_enable_mutex) ||
266 lockdep_is_held(&scx_sched_lock));
267
268 /* if first iteration, visit @root */
269 if (!pos)
270 return root;
271
272 /* visit the first child if exists */
273 next = list_first_entry_or_null(&pos->children, struct scx_sched, sibling);
274 if (next)
275 return next;
276
277 /* no child, visit my or the closest ancestor's next sibling */
278 while (pos != root) {
279 if (!list_is_last(&pos->sibling, &scx_parent(pos)->children))
280 return list_next_entry(pos, sibling);
281 pos = scx_parent(pos);
282 }
283
284 return NULL;
285 }
286
scx_find_sub_sched(u64 cgroup_id)287 static struct scx_sched *scx_find_sub_sched(u64 cgroup_id)
288 {
289 return rhashtable_lookup(&scx_sched_hash, &cgroup_id,
290 scx_sched_hash_params);
291 }
292
scx_set_task_sched(struct task_struct * p,struct scx_sched * sch)293 static void scx_set_task_sched(struct task_struct *p, struct scx_sched *sch)
294 {
295 rcu_assign_pointer(p->scx.sched, sch);
296 }
297 #else /* CONFIG_EXT_SUB_SCHED */
scx_parent(struct scx_sched * sch)298 static struct scx_sched *scx_parent(struct scx_sched *sch) { return NULL; }
scx_next_descendant_pre(struct scx_sched * pos,struct scx_sched * root)299 static struct scx_sched *scx_next_descendant_pre(struct scx_sched *pos, struct scx_sched *root) { return pos ? NULL : root; }
scx_find_sub_sched(u64 cgroup_id)300 static struct scx_sched *scx_find_sub_sched(u64 cgroup_id) { return NULL; }
scx_set_task_sched(struct task_struct * p,struct scx_sched * sch)301 static void scx_set_task_sched(struct task_struct *p, struct scx_sched *sch) {}
302 #endif /* CONFIG_EXT_SUB_SCHED */
303
304 /**
305 * scx_is_descendant - Test whether sched is a descendant
306 * @sch: sched to test
307 * @ancestor: ancestor sched to test against
308 *
309 * Test whether @sch is a descendant of @ancestor.
310 */
scx_is_descendant(struct scx_sched * sch,struct scx_sched * ancestor)311 static bool scx_is_descendant(struct scx_sched *sch, struct scx_sched *ancestor)
312 {
313 if (sch->level < ancestor->level)
314 return false;
315 return sch->ancestors[ancestor->level] == ancestor;
316 }
317
318 /**
319 * scx_for_each_descendant_pre - pre-order walk of a sched's descendants
320 * @pos: iteration cursor
321 * @root: sched to walk the descendants of
322 *
323 * Walk @root's descendants. @root is included in the iteration and the first
324 * node to be visited. Must be called with either scx_enable_mutex or
325 * scx_sched_lock held.
326 */
327 #define scx_for_each_descendant_pre(pos, root) \
328 for ((pos) = scx_next_descendant_pre(NULL, (root)); (pos); \
329 (pos) = scx_next_descendant_pre((pos), (root)))
330
find_global_dsq(struct scx_sched * sch,s32 cpu)331 static struct scx_dispatch_q *find_global_dsq(struct scx_sched *sch, s32 cpu)
332 {
333 return &sch->pnode[cpu_to_node(cpu)]->global_dsq;
334 }
335
find_user_dsq(struct scx_sched * sch,u64 dsq_id)336 static struct scx_dispatch_q *find_user_dsq(struct scx_sched *sch, u64 dsq_id)
337 {
338 return rhashtable_lookup(&sch->dsq_hash, &dsq_id, dsq_hash_params);
339 }
340
scx_setscheduler_class(struct task_struct * p)341 static const struct sched_class *scx_setscheduler_class(struct task_struct *p)
342 {
343 if (p->sched_class == &stop_sched_class)
344 return &stop_sched_class;
345
346 return __setscheduler_class(p->policy, p->prio);
347 }
348
bypass_dsq(struct scx_sched * sch,s32 cpu)349 static struct scx_dispatch_q *bypass_dsq(struct scx_sched *sch, s32 cpu)
350 {
351 return &per_cpu_ptr(sch->pcpu, cpu)->bypass_dsq;
352 }
353
bypass_enq_target_dsq(struct scx_sched * sch,s32 cpu)354 static struct scx_dispatch_q *bypass_enq_target_dsq(struct scx_sched *sch, s32 cpu)
355 {
356 #ifdef CONFIG_EXT_SUB_SCHED
357 /*
358 * If @sch is a sub-sched which is bypassing, its tasks should go into
359 * the bypass DSQs of the nearest ancestor which is not bypassing. The
360 * not-bypassing ancestor is responsible for scheduling all tasks from
361 * bypassing sub-trees. If all ancestors including root are bypassing,
362 * all tasks should go to the root's bypass DSQs.
363 *
364 * Whenever a sched starts bypassing, all runnable tasks in its subtree
365 * are re-enqueued after scx_bypassing() is turned on, guaranteeing that
366 * all tasks are transferred to the right DSQs.
367 */
368 while (scx_parent(sch) && scx_bypassing(sch, cpu))
369 sch = scx_parent(sch);
370 #endif /* CONFIG_EXT_SUB_SCHED */
371
372 return bypass_dsq(sch, cpu);
373 }
374
375 /**
376 * bypass_dsp_enabled - Check if bypass dispatch path is enabled
377 * @sch: scheduler to check
378 *
379 * When a descendant scheduler enters bypass mode, bypassed tasks are scheduled
380 * by the nearest non-bypassing ancestor, or the root scheduler if all ancestors
381 * are bypassing. In the former case, the ancestor is not itself bypassing but
382 * its bypass DSQs will be populated with bypassed tasks from descendants. Thus,
383 * the ancestor's bypass dispatch path must be active even though its own
384 * bypass_depth remains zero.
385 *
386 * This function checks bypass_dsp_enable_depth which is managed separately from
387 * bypass_depth to enable this decoupling. See enable_bypass_dsp() and
388 * disable_bypass_dsp().
389 */
bypass_dsp_enabled(struct scx_sched * sch)390 static bool bypass_dsp_enabled(struct scx_sched *sch)
391 {
392 return unlikely(atomic_read(&sch->bypass_dsp_enable_depth));
393 }
394
395 /**
396 * rq_is_open - Is the rq available for immediate execution of an SCX task?
397 * @rq: rq to test
398 * @enq_flags: optional %SCX_ENQ_* of the task being enqueued
399 *
400 * Returns %true if @rq is currently open for executing an SCX task. After a
401 * %false return, @rq is guaranteed to invoke SCX dispatch path at least once
402 * before going to idle and not inserting a task into @rq's local DSQ after a
403 * %false return doesn't cause @rq to stall.
404 */
rq_is_open(struct rq * rq,u64 enq_flags)405 static bool rq_is_open(struct rq *rq, u64 enq_flags)
406 {
407 lockdep_assert_rq_held(rq);
408
409 /*
410 * A higher-priority class task is either running or in the process of
411 * waking up on @rq.
412 */
413 if (sched_class_above(rq->next_class, &ext_sched_class))
414 return false;
415
416 /*
417 * @rq is either in transition to or in idle and there is no
418 * higher-priority class task waking up on it.
419 */
420 if (sched_class_above(&ext_sched_class, rq->next_class))
421 return true;
422
423 /*
424 * @rq is either picking, in transition to, or running an SCX task.
425 */
426
427 /*
428 * If we're in the dispatch path holding rq lock, $curr may or may not
429 * be ready depending on whether the on-going dispatch decides to extend
430 * $curr's slice. We say yes here and resolve it at the end of dispatch.
431 * See balance_one().
432 */
433 if (rq->scx.flags & SCX_RQ_IN_BALANCE)
434 return true;
435
436 /*
437 * %SCX_ENQ_PREEMPT clears $curr's slice if on SCX and kicks dispatch,
438 * so allow it to avoid spuriously triggering reenq on a combined
439 * PREEMPT|IMMED insertion.
440 */
441 if (enq_flags & SCX_ENQ_PREEMPT)
442 return true;
443
444 /*
445 * @rq is either in transition to or running an SCX task and can't go
446 * idle without another SCX dispatch cycle.
447 */
448 return false;
449 }
450
451 /*
452 * Track the rq currently locked.
453 *
454 * This allows kfuncs to safely operate on rq from any scx ops callback,
455 * knowing which rq is already locked.
456 */
457 DEFINE_PER_CPU(struct rq *, scx_locked_rq_state);
458
update_locked_rq(struct rq * rq)459 static inline void update_locked_rq(struct rq *rq)
460 {
461 /*
462 * Check whether @rq is actually locked. This can help expose bugs
463 * or incorrect assumptions about the context in which a kfunc or
464 * callback is executed.
465 */
466 if (rq)
467 lockdep_assert_rq_held(rq);
468 __this_cpu_write(scx_locked_rq_state, rq);
469 }
470
471 /*
472 * SCX ops can recurse via scx_bpf_sub_dispatch() - the inner call must not
473 * clobber the outer's scx_locked_rq_state. Save it on entry, restore on exit.
474 */
475 #define SCX_CALL_OP(sch, op, locked_rq, args...) \
476 do { \
477 struct rq *__prev_locked_rq; \
478 \
479 if (locked_rq) { \
480 __prev_locked_rq = scx_locked_rq(); \
481 update_locked_rq(locked_rq); \
482 } \
483 (sch)->ops.op(args); \
484 if (locked_rq) \
485 update_locked_rq(__prev_locked_rq); \
486 } while (0)
487
488 #define SCX_CALL_OP_RET(sch, op, locked_rq, args...) \
489 ({ \
490 struct rq *__prev_locked_rq; \
491 __typeof__((sch)->ops.op(args)) __ret; \
492 \
493 if (locked_rq) { \
494 __prev_locked_rq = scx_locked_rq(); \
495 update_locked_rq(locked_rq); \
496 } \
497 __ret = (sch)->ops.op(args); \
498 if (locked_rq) \
499 update_locked_rq(__prev_locked_rq); \
500 __ret; \
501 })
502
503 /*
504 * SCX_CALL_OP_TASK*() invokes an SCX op that takes one or two task arguments
505 * and records them in current->scx.kf_tasks[] for the duration of the call. A
506 * kfunc invoked from inside such an op can then use
507 * scx_kf_arg_task_ok() to verify that its task argument is one of
508 * those subject tasks.
509 *
510 * Every SCX_CALL_OP_TASK*() call site invokes its op with @p's rq lock held -
511 * either via the @locked_rq argument here, or (for ops.select_cpu()) via @p's
512 * pi_lock held by try_to_wake_up() with rq tracking via scx_rq.in_select_cpu.
513 * So if kf_tasks[] is set, @p's scheduler-protected fields are stable.
514 *
515 * kf_tasks[] can not stack, so task-based SCX ops must not nest. The
516 * WARN_ON_ONCE() in each macro catches a re-entry of any of the three variants
517 * while a previous one is still in progress.
518 */
519 #define SCX_CALL_OP_TASK(sch, op, locked_rq, task, args...) \
520 do { \
521 WARN_ON_ONCE(current->scx.kf_tasks[0]); \
522 current->scx.kf_tasks[0] = task; \
523 SCX_CALL_OP((sch), op, locked_rq, task, ##args); \
524 current->scx.kf_tasks[0] = NULL; \
525 } while (0)
526
527 #define SCX_CALL_OP_TASK_RET(sch, op, locked_rq, task, args...) \
528 ({ \
529 __typeof__((sch)->ops.op(task, ##args)) __ret; \
530 WARN_ON_ONCE(current->scx.kf_tasks[0]); \
531 current->scx.kf_tasks[0] = task; \
532 __ret = SCX_CALL_OP_RET((sch), op, locked_rq, task, ##args); \
533 current->scx.kf_tasks[0] = NULL; \
534 __ret; \
535 })
536
537 #define SCX_CALL_OP_2TASKS_RET(sch, op, locked_rq, task0, task1, args...) \
538 ({ \
539 __typeof__((sch)->ops.op(task0, task1, ##args)) __ret; \
540 WARN_ON_ONCE(current->scx.kf_tasks[0]); \
541 current->scx.kf_tasks[0] = task0; \
542 current->scx.kf_tasks[1] = task1; \
543 __ret = SCX_CALL_OP_RET((sch), op, locked_rq, task0, task1, ##args); \
544 current->scx.kf_tasks[0] = NULL; \
545 current->scx.kf_tasks[1] = NULL; \
546 __ret; \
547 })
548
549 /* see SCX_CALL_OP_TASK() */
scx_kf_arg_task_ok(struct scx_sched * sch,struct task_struct * p)550 static __always_inline bool scx_kf_arg_task_ok(struct scx_sched *sch,
551 struct task_struct *p)
552 {
553 if (unlikely((p != current->scx.kf_tasks[0] &&
554 p != current->scx.kf_tasks[1]))) {
555 scx_error(sch, "called on a task not being operated on");
556 return false;
557 }
558
559 return true;
560 }
561
562 enum scx_dsq_iter_flags {
563 /* iterate in the reverse dispatch order */
564 SCX_DSQ_ITER_REV = 1U << 16,
565
566 __SCX_DSQ_ITER_HAS_SLICE = 1U << 30,
567 __SCX_DSQ_ITER_HAS_VTIME = 1U << 31,
568
569 __SCX_DSQ_ITER_USER_FLAGS = SCX_DSQ_ITER_REV,
570 __SCX_DSQ_ITER_ALL_FLAGS = __SCX_DSQ_ITER_USER_FLAGS |
571 __SCX_DSQ_ITER_HAS_SLICE |
572 __SCX_DSQ_ITER_HAS_VTIME,
573 };
574
575 /**
576 * nldsq_next_task - Iterate to the next task in a non-local DSQ
577 * @dsq: non-local dsq being iterated
578 * @cur: current position, %NULL to start iteration
579 * @rev: walk backwards
580 *
581 * Returns %NULL when iteration is finished.
582 */
nldsq_next_task(struct scx_dispatch_q * dsq,struct task_struct * cur,bool rev)583 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
584 struct task_struct *cur, bool rev)
585 {
586 struct list_head *list_node;
587 struct scx_dsq_list_node *dsq_lnode;
588
589 lockdep_assert_held(&dsq->lock);
590
591 if (cur)
592 list_node = &cur->scx.dsq_list.node;
593 else
594 list_node = &dsq->list;
595
596 /* find the next task, need to skip BPF iteration cursors */
597 do {
598 if (rev)
599 list_node = list_node->prev;
600 else
601 list_node = list_node->next;
602
603 if (list_node == &dsq->list)
604 return NULL;
605
606 dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
607 node);
608 } while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
609
610 return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
611 }
612
613 #define nldsq_for_each_task(p, dsq) \
614 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \
615 (p) = nldsq_next_task((dsq), (p), false))
616
617 /**
618 * nldsq_cursor_next_task - Iterate to the next task given a cursor in a non-local DSQ
619 * @cursor: scx_dsq_list_node initialized with INIT_DSQ_LIST_CURSOR()
620 * @dsq: non-local dsq being iterated
621 *
622 * Find the next task in a cursor based iteration. The caller must have
623 * initialized @cursor using INIT_DSQ_LIST_CURSOR() and can release the DSQ lock
624 * between the iteration steps.
625 *
626 * Only tasks which were queued before @cursor was initialized are visible. This
627 * bounds the iteration and guarantees that vtime never jumps in the other
628 * direction while iterating.
629 */
nldsq_cursor_next_task(struct scx_dsq_list_node * cursor,struct scx_dispatch_q * dsq)630 static struct task_struct *nldsq_cursor_next_task(struct scx_dsq_list_node *cursor,
631 struct scx_dispatch_q *dsq)
632 {
633 bool rev = cursor->flags & SCX_DSQ_ITER_REV;
634 struct task_struct *p;
635
636 lockdep_assert_held(&dsq->lock);
637 BUG_ON(!(cursor->flags & SCX_DSQ_LNODE_ITER_CURSOR));
638
639 if (list_empty(&cursor->node))
640 p = NULL;
641 else
642 p = container_of(cursor, struct task_struct, scx.dsq_list);
643
644 /* skip cursors and tasks that were queued after @cursor init */
645 do {
646 p = nldsq_next_task(dsq, p, rev);
647 } while (p && unlikely(u32_before(cursor->priv, p->scx.dsq_seq)));
648
649 if (p) {
650 if (rev)
651 list_move_tail(&cursor->node, &p->scx.dsq_list.node);
652 else
653 list_move(&cursor->node, &p->scx.dsq_list.node);
654 } else {
655 list_del_init(&cursor->node);
656 }
657
658 return p;
659 }
660
661 /**
662 * nldsq_cursor_lost_task - Test whether someone else took the task since iteration
663 * @cursor: scx_dsq_list_node initialized with INIT_DSQ_LIST_CURSOR()
664 * @rq: rq @p was on
665 * @dsq: dsq @p was on
666 * @p: target task
667 *
668 * @p is a task returned by nldsq_cursor_next_task(). The locks may have been
669 * dropped and re-acquired inbetween. Verify that no one else took or is in the
670 * process of taking @p from @dsq.
671 *
672 * On %false return, the caller can assume full ownership of @p.
673 */
nldsq_cursor_lost_task(struct scx_dsq_list_node * cursor,struct rq * rq,struct scx_dispatch_q * dsq,struct task_struct * p)674 static bool nldsq_cursor_lost_task(struct scx_dsq_list_node *cursor,
675 struct rq *rq, struct scx_dispatch_q *dsq,
676 struct task_struct *p)
677 {
678 lockdep_assert_rq_held(rq);
679 lockdep_assert_held(&dsq->lock);
680
681 /*
682 * @p could have already left $src_dsq, got re-enqueud, or be in the
683 * process of being consumed by someone else.
684 */
685 if (unlikely(p->scx.dsq != dsq ||
686 u32_before(cursor->priv, p->scx.dsq_seq) ||
687 p->scx.holding_cpu >= 0))
688 return true;
689
690 /* if @p has stayed on @dsq, its rq couldn't have changed */
691 if (WARN_ON_ONCE(rq != task_rq(p)))
692 return true;
693
694 return false;
695 }
696
697 /*
698 * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
699 * dispatch order. BPF-visible iterator is opaque and larger to allow future
700 * changes without breaking backward compatibility. Can be used with
701 * bpf_for_each(). See bpf_iter_scx_dsq_*().
702 */
703 struct bpf_iter_scx_dsq_kern {
704 struct scx_dsq_list_node cursor;
705 struct scx_dispatch_q *dsq;
706 u64 slice;
707 u64 vtime;
708 } __attribute__((aligned(8)));
709
710 struct bpf_iter_scx_dsq {
711 u64 __opaque[6];
712 } __attribute__((aligned(8)));
713
714
715 /*
716 * SCX task iterator.
717 */
718 struct scx_task_iter {
719 struct sched_ext_entity cursor;
720 struct task_struct *locked_task;
721 struct rq *rq;
722 struct rq_flags rf;
723 u32 cnt;
724 bool list_locked;
725 #ifdef CONFIG_EXT_SUB_SCHED
726 struct cgroup *cgrp;
727 struct cgroup_subsys_state *css_pos;
728 struct css_task_iter css_iter;
729 #endif
730 };
731
732 /**
733 * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
734 * @iter: iterator to init
735 * @cgrp: Optional root of cgroup subhierarchy to iterate
736 *
737 * Initialize @iter. Once initialized, @iter must eventually be stopped with
738 * scx_task_iter_stop().
739 *
740 * If @cgrp is %NULL, scx_tasks is used for iteration and this function returns
741 * with scx_tasks_lock held and @iter->cursor inserted into scx_tasks.
742 *
743 * If @cgrp is not %NULL, @cgrp and its descendants' tasks are walked using
744 * @iter->css_iter. The caller must be holding cgroup_lock() to prevent cgroup
745 * task migrations.
746 *
747 * The two modes of iterations are largely independent and it's likely that
748 * scx_tasks can be removed in favor of always using cgroup iteration if
749 * CONFIG_SCHED_CLASS_EXT depends on CONFIG_CGROUPS.
750 *
751 * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
752 * between this and the first next() call or between any two next() calls. If
753 * the locks are released between two next() calls, the caller is responsible
754 * for ensuring that the task being iterated remains accessible either through
755 * RCU read lock or obtaining a reference count.
756 *
757 * All tasks which existed when the iteration started are guaranteed to be
758 * visited as long as they are not dead.
759 */
scx_task_iter_start(struct scx_task_iter * iter,struct cgroup * cgrp)760 static void scx_task_iter_start(struct scx_task_iter *iter, struct cgroup *cgrp)
761 {
762 memset(iter, 0, sizeof(*iter));
763
764 #ifdef CONFIG_EXT_SUB_SCHED
765 if (cgrp) {
766 lockdep_assert_held(&cgroup_mutex);
767 iter->cgrp = cgrp;
768 iter->css_pos = css_next_descendant_pre(NULL, &iter->cgrp->self);
769 css_task_iter_start(iter->css_pos, CSS_TASK_ITER_WITH_DEAD,
770 &iter->css_iter);
771 return;
772 }
773 #endif
774 raw_spin_lock_irq(&scx_tasks_lock);
775
776 iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
777 list_add(&iter->cursor.tasks_node, &scx_tasks);
778 iter->list_locked = true;
779 }
780
__scx_task_iter_rq_unlock(struct scx_task_iter * iter)781 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
782 {
783 if (iter->locked_task) {
784 __balance_callbacks(iter->rq, &iter->rf);
785 task_rq_unlock(iter->rq, iter->locked_task, &iter->rf);
786 iter->locked_task = NULL;
787 }
788 }
789
790 /**
791 * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
792 * @iter: iterator to unlock
793 *
794 * If @iter is in the middle of a locked iteration, it may be locking the rq of
795 * the task currently being visited in addition to scx_tasks_lock. Unlock both.
796 * This function can be safely called anytime during an iteration. The next
797 * iterator operation will automatically restore the necessary locking.
798 */
scx_task_iter_unlock(struct scx_task_iter * iter)799 static void scx_task_iter_unlock(struct scx_task_iter *iter)
800 {
801 __scx_task_iter_rq_unlock(iter);
802 if (iter->list_locked) {
803 iter->list_locked = false;
804 raw_spin_unlock_irq(&scx_tasks_lock);
805 }
806 }
807
__scx_task_iter_maybe_relock(struct scx_task_iter * iter)808 static void __scx_task_iter_maybe_relock(struct scx_task_iter *iter)
809 {
810 if (!iter->list_locked) {
811 raw_spin_lock_irq(&scx_tasks_lock);
812 iter->list_locked = true;
813 }
814 }
815
816 /**
817 * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
818 * @iter: iterator to exit
819 *
820 * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
821 * which is released on return. If the iterator holds a task's rq lock, that rq
822 * lock is also released. See scx_task_iter_start() for details.
823 */
scx_task_iter_stop(struct scx_task_iter * iter)824 static void scx_task_iter_stop(struct scx_task_iter *iter)
825 {
826 #ifdef CONFIG_EXT_SUB_SCHED
827 if (iter->cgrp) {
828 if (iter->css_pos)
829 css_task_iter_end(&iter->css_iter);
830 __scx_task_iter_rq_unlock(iter);
831 return;
832 }
833 #endif
834 __scx_task_iter_maybe_relock(iter);
835 list_del_init(&iter->cursor.tasks_node);
836 scx_task_iter_unlock(iter);
837 }
838
839 /**
840 * scx_task_iter_next - Next task
841 * @iter: iterator to walk
842 *
843 * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
844 * and re-acquired every %SCX_TASK_ITER_BATCH iterations to avoid causing stalls
845 * by holding scx_tasks_lock for too long.
846 */
scx_task_iter_next(struct scx_task_iter * iter)847 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
848 {
849 struct list_head *cursor = &iter->cursor.tasks_node;
850 struct sched_ext_entity *pos;
851
852 if (!(++iter->cnt % SCX_TASK_ITER_BATCH)) {
853 scx_task_iter_unlock(iter);
854 cond_resched();
855 }
856
857 #ifdef CONFIG_EXT_SUB_SCHED
858 if (iter->cgrp) {
859 while (iter->css_pos) {
860 struct task_struct *p;
861
862 p = css_task_iter_next(&iter->css_iter);
863 if (p)
864 return p;
865
866 css_task_iter_end(&iter->css_iter);
867 iter->css_pos = css_next_descendant_pre(iter->css_pos,
868 &iter->cgrp->self);
869 if (iter->css_pos)
870 css_task_iter_start(iter->css_pos, CSS_TASK_ITER_WITH_DEAD,
871 &iter->css_iter);
872 }
873 return NULL;
874 }
875 #endif
876 __scx_task_iter_maybe_relock(iter);
877
878 list_for_each_entry(pos, cursor, tasks_node) {
879 if (&pos->tasks_node == &scx_tasks)
880 return NULL;
881 if (!(pos->flags & SCX_TASK_CURSOR)) {
882 list_move(cursor, &pos->tasks_node);
883 return container_of(pos, struct task_struct, scx);
884 }
885 }
886
887 /* can't happen, should always terminate at scx_tasks above */
888 BUG();
889 }
890
891 /**
892 * scx_task_iter_next_locked - Next non-idle task with its rq locked
893 * @iter: iterator to walk
894 *
895 * Visit the non-idle task with its rq lock held. Allows callers to specify
896 * whether they would like to filter out dead tasks. See scx_task_iter_start()
897 * for details.
898 */
scx_task_iter_next_locked(struct scx_task_iter * iter)899 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
900 {
901 struct task_struct *p;
902
903 __scx_task_iter_rq_unlock(iter);
904
905 while ((p = scx_task_iter_next(iter))) {
906 /*
907 * scx_task_iter is used to prepare and move tasks into SCX
908 * while loading the BPF scheduler and vice-versa while
909 * unloading. The init_tasks ("swappers") should be excluded
910 * from the iteration because:
911 *
912 * - It's unsafe to use __setschduler_prio() on an init_task to
913 * determine the sched_class to use as it won't preserve its
914 * idle_sched_class.
915 *
916 * - ops.init/exit_task() can easily be confused if called with
917 * init_tasks as they, e.g., share PID 0.
918 *
919 * As init_tasks are never scheduled through SCX, they can be
920 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
921 * doesn't work here:
922 *
923 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
924 * yet been onlined.
925 *
926 * - %PF_IDLE can be set on tasks that are not init_tasks. See
927 * play_idle_precise() used by CONFIG_IDLE_INJECT.
928 *
929 * Test for idle_sched_class as only init_tasks are on it.
930 */
931 if (p->sched_class == &idle_sched_class)
932 continue;
933
934 iter->rq = task_rq_lock(p, &iter->rf);
935 iter->locked_task = p;
936
937 /*
938 * cgroup_task_dead() removes the dead tasks from cset->tasks
939 * after sched_ext_dead() and cgroup iteration may see tasks
940 * which already finished sched_ext_dead(). %SCX_TASK_OFF_TASKS
941 * is set by sched_ext_dead() under @p's rq lock. Test it to
942 * avoid visiting tasks which are already dead from SCX POV.
943 */
944 if (p->scx.flags & SCX_TASK_OFF_TASKS) {
945 __scx_task_iter_rq_unlock(iter);
946 continue;
947 }
948
949 return p;
950 }
951 return NULL;
952 }
953
954 /**
955 * scx_add_event - Increase an event counter for 'name' by 'cnt'
956 * @sch: scx_sched to account events for
957 * @name: an event name defined in struct scx_event_stats
958 * @cnt: the number of the event occurred
959 *
960 * This can be used when preemption is not disabled.
961 */
962 #define scx_add_event(sch, name, cnt) do { \
963 this_cpu_add((sch)->pcpu->event_stats.name, (cnt)); \
964 trace_sched_ext_event(#name, (cnt)); \
965 } while(0)
966
967 /**
968 * __scx_add_event - Increase an event counter for 'name' by 'cnt'
969 * @sch: scx_sched to account events for
970 * @name: an event name defined in struct scx_event_stats
971 * @cnt: the number of the event occurred
972 *
973 * This should be used only when preemption is disabled.
974 */
975 #define __scx_add_event(sch, name, cnt) do { \
976 __this_cpu_add((sch)->pcpu->event_stats.name, (cnt)); \
977 trace_sched_ext_event(#name, cnt); \
978 } while(0)
979
980 /**
981 * scx_agg_event - Aggregate an event counter 'kind' from 'src_e' to 'dst_e'
982 * @dst_e: destination event stats
983 * @src_e: source event stats
984 * @kind: a kind of event to be aggregated
985 */
986 #define scx_agg_event(dst_e, src_e, kind) do { \
987 (dst_e)->kind += READ_ONCE((src_e)->kind); \
988 } while(0)
989
990 /**
991 * scx_dump_event - Dump an event 'kind' in 'events' to 's'
992 * @s: output seq_buf
993 * @events: event stats
994 * @kind: a kind of event to dump
995 */
996 #define scx_dump_event(s, events, kind) do { \
997 dump_line(&(s), "%40s: %16lld", #kind, (events)->kind); \
998 } while (0)
999
1000
1001 static void scx_read_events(struct scx_sched *sch,
1002 struct scx_event_stats *events);
1003
scx_enable_state(void)1004 static enum scx_enable_state scx_enable_state(void)
1005 {
1006 return atomic_read(&scx_enable_state_var);
1007 }
1008
scx_set_enable_state(enum scx_enable_state to)1009 static enum scx_enable_state scx_set_enable_state(enum scx_enable_state to)
1010 {
1011 return atomic_xchg(&scx_enable_state_var, to);
1012 }
1013
scx_tryset_enable_state(enum scx_enable_state to,enum scx_enable_state from)1014 static bool scx_tryset_enable_state(enum scx_enable_state to,
1015 enum scx_enable_state from)
1016 {
1017 int from_v = from;
1018
1019 return atomic_try_cmpxchg(&scx_enable_state_var, &from_v, to);
1020 }
1021
1022 /**
1023 * wait_ops_state - Busy-wait the specified ops state to end
1024 * @p: target task
1025 * @opss: state to wait the end of
1026 *
1027 * Busy-wait for @p to transition out of @opss. This can only be used when the
1028 * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
1029 * has load_acquire semantics to ensure that the caller can see the updates made
1030 * in the enqueueing and dispatching paths.
1031 */
wait_ops_state(struct task_struct * p,unsigned long opss)1032 static void wait_ops_state(struct task_struct *p, unsigned long opss)
1033 {
1034 do {
1035 cpu_relax();
1036 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
1037 }
1038
__cpu_valid(s32 cpu)1039 static inline bool __cpu_valid(s32 cpu)
1040 {
1041 return likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu));
1042 }
1043
1044 /**
1045 * ops_cpu_valid - Verify a cpu number, to be used on ops input args
1046 * @sch: scx_sched to abort on error
1047 * @cpu: cpu number which came from a BPF ops
1048 * @where: extra information reported on error
1049 *
1050 * @cpu is a cpu number which came from the BPF scheduler and can be any value.
1051 * Verify that it is in range and one of the possible cpus. If invalid, trigger
1052 * an ops error.
1053 */
ops_cpu_valid(struct scx_sched * sch,s32 cpu,const char * where)1054 static bool ops_cpu_valid(struct scx_sched *sch, s32 cpu, const char *where)
1055 {
1056 if (__cpu_valid(cpu)) {
1057 return true;
1058 } else {
1059 scx_error(sch, "invalid CPU %d%s%s", cpu, where ? " " : "", where ?: "");
1060 return false;
1061 }
1062 }
1063
1064 /**
1065 * ops_sanitize_err - Sanitize a -errno value
1066 * @sch: scx_sched to error out on error
1067 * @ops_name: operation to blame on failure
1068 * @err: -errno value to sanitize
1069 *
1070 * Verify @err is a valid -errno. If not, trigger scx_error() and return
1071 * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
1072 * cause misbehaviors. For an example, a large negative return from
1073 * ops.init_task() triggers an oops when passed up the call chain because the
1074 * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
1075 * handled as a pointer.
1076 */
ops_sanitize_err(struct scx_sched * sch,const char * ops_name,s32 err)1077 static int ops_sanitize_err(struct scx_sched *sch, const char *ops_name, s32 err)
1078 {
1079 if (err < 0 && err >= -MAX_ERRNO)
1080 return err;
1081
1082 scx_error(sch, "ops.%s() returned an invalid errno %d", ops_name, err);
1083 return -EPROTO;
1084 }
1085
deferred_bal_cb_workfn(struct rq * rq)1086 static void deferred_bal_cb_workfn(struct rq *rq)
1087 {
1088 run_deferred(rq);
1089 }
1090
deferred_irq_workfn(struct irq_work * irq_work)1091 static void deferred_irq_workfn(struct irq_work *irq_work)
1092 {
1093 struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
1094
1095 raw_spin_rq_lock(rq);
1096 run_deferred(rq);
1097 raw_spin_rq_unlock(rq);
1098 }
1099
1100 /**
1101 * schedule_deferred - Schedule execution of deferred actions on an rq
1102 * @rq: target rq
1103 *
1104 * Schedule execution of deferred actions on @rq. Deferred actions are executed
1105 * with @rq locked but unpinned, and thus can unlock @rq to e.g. migrate tasks
1106 * to other rqs.
1107 */
schedule_deferred(struct rq * rq)1108 static void schedule_deferred(struct rq *rq)
1109 {
1110 /*
1111 * This is the fallback when schedule_deferred_locked() can't use
1112 * the cheaper balance callback or wakeup hook paths (the target
1113 * CPU is not in balance or wakeup). Currently, this is primarily
1114 * hit by reenqueue operations targeting a remote CPU.
1115 *
1116 * Queue on the target CPU. The deferred work can run from any CPU
1117 * correctly - the _locked() path already processes remote rqs from
1118 * the calling CPU - but targeting the owning CPU allows IPI delivery
1119 * without waiting for the calling CPU to re-enable IRQs and is
1120 * cheaper as the reenqueue runs locally.
1121 */
1122 irq_work_queue_on(&rq->scx.deferred_irq_work, cpu_of(rq));
1123 }
1124
1125 /**
1126 * schedule_deferred_locked - Schedule execution of deferred actions on an rq
1127 * @rq: target rq
1128 *
1129 * Schedule execution of deferred actions on @rq. Equivalent to
1130 * schedule_deferred() but requires @rq to be locked and can be more efficient.
1131 */
schedule_deferred_locked(struct rq * rq)1132 static void schedule_deferred_locked(struct rq *rq)
1133 {
1134 lockdep_assert_rq_held(rq);
1135
1136 /*
1137 * If in the middle of waking up a task, task_woken_scx() will be called
1138 * afterwards which will then run the deferred actions, no need to
1139 * schedule anything.
1140 */
1141 if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
1142 return;
1143
1144 /* Don't do anything if there already is a deferred operation. */
1145 if (rq->scx.flags & SCX_RQ_BAL_CB_PENDING)
1146 return;
1147
1148 /*
1149 * If in balance, the balance callbacks will be called before rq lock is
1150 * released. Schedule one.
1151 *
1152 *
1153 * We can't directly insert the callback into the
1154 * rq's list: The call can drop its lock and make the pending balance
1155 * callback visible to unrelated code paths that call rq_pin_lock().
1156 *
1157 * Just let balance_one() know that it must do it itself.
1158 */
1159 if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
1160 rq->scx.flags |= SCX_RQ_BAL_CB_PENDING;
1161 return;
1162 }
1163
1164 /*
1165 * No scheduler hooks available. Use the generic irq_work path. The
1166 * above WAKEUP and BALANCE paths should cover most of the cases and the
1167 * time to IRQ re-enable shouldn't be long.
1168 */
1169 schedule_deferred(rq);
1170 }
1171
schedule_dsq_reenq(struct scx_sched * sch,struct scx_dispatch_q * dsq,u64 reenq_flags,struct rq * locked_rq)1172 static void schedule_dsq_reenq(struct scx_sched *sch, struct scx_dispatch_q *dsq,
1173 u64 reenq_flags, struct rq *locked_rq)
1174 {
1175 struct rq *rq;
1176
1177 /*
1178 * Allowing reenqueues doesn't make sense while bypassing. This also
1179 * blocks from new reenqueues to be scheduled on dead scheds.
1180 */
1181 if (unlikely(READ_ONCE(sch->bypass_depth)))
1182 return;
1183
1184 if (dsq->id == SCX_DSQ_LOCAL) {
1185 rq = container_of(dsq, struct rq, scx.local_dsq);
1186
1187 struct scx_sched_pcpu *sch_pcpu = per_cpu_ptr(sch->pcpu, cpu_of(rq));
1188 struct scx_deferred_reenq_local *drl = &sch_pcpu->deferred_reenq_local;
1189
1190 /*
1191 * Pairs with smp_mb() in process_deferred_reenq_locals() and
1192 * guarantees that there is a reenq_local() afterwards.
1193 */
1194 smp_mb();
1195
1196 if (list_empty(&drl->node) ||
1197 (READ_ONCE(drl->flags) & reenq_flags) != reenq_flags) {
1198
1199 guard(raw_spinlock_irqsave)(&rq->scx.deferred_reenq_lock);
1200
1201 if (list_empty(&drl->node))
1202 list_move_tail(&drl->node, &rq->scx.deferred_reenq_locals);
1203 WRITE_ONCE(drl->flags, drl->flags | reenq_flags);
1204 }
1205 } else if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN)) {
1206 rq = this_rq();
1207
1208 struct scx_dsq_pcpu *dsq_pcpu = per_cpu_ptr(dsq->pcpu, cpu_of(rq));
1209 struct scx_deferred_reenq_user *dru = &dsq_pcpu->deferred_reenq_user;
1210
1211 /*
1212 * Pairs with smp_mb() in process_deferred_reenq_users() and
1213 * guarantees that there is a reenq_user() afterwards.
1214 */
1215 smp_mb();
1216
1217 if (list_empty(&dru->node) ||
1218 (READ_ONCE(dru->flags) & reenq_flags) != reenq_flags) {
1219
1220 guard(raw_spinlock_irqsave)(&rq->scx.deferred_reenq_lock);
1221
1222 if (list_empty(&dru->node))
1223 list_move_tail(&dru->node, &rq->scx.deferred_reenq_users);
1224 WRITE_ONCE(dru->flags, dru->flags | reenq_flags);
1225 }
1226 } else {
1227 scx_error(sch, "DSQ 0x%llx not allowed for reenq", dsq->id);
1228 return;
1229 }
1230
1231 if (rq == locked_rq)
1232 schedule_deferred_locked(rq);
1233 else
1234 schedule_deferred(rq);
1235 }
1236
schedule_reenq_local(struct rq * rq,u64 reenq_flags)1237 static void schedule_reenq_local(struct rq *rq, u64 reenq_flags)
1238 {
1239 struct scx_sched *root = rcu_dereference_sched(scx_root);
1240
1241 if (WARN_ON_ONCE(!root))
1242 return;
1243
1244 schedule_dsq_reenq(root, &rq->scx.local_dsq, reenq_flags, rq);
1245 }
1246
1247 /**
1248 * touch_core_sched - Update timestamp used for core-sched task ordering
1249 * @rq: rq to read clock from, must be locked
1250 * @p: task to update the timestamp for
1251 *
1252 * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
1253 * implement global or local-DSQ FIFO ordering for core-sched. Should be called
1254 * when a task becomes runnable and its turn on the CPU ends (e.g. slice
1255 * exhaustion).
1256 */
touch_core_sched(struct rq * rq,struct task_struct * p)1257 static void touch_core_sched(struct rq *rq, struct task_struct *p)
1258 {
1259 lockdep_assert_rq_held(rq);
1260
1261 #ifdef CONFIG_SCHED_CORE
1262 /*
1263 * It's okay to update the timestamp spuriously. Use
1264 * sched_core_disabled() which is cheaper than enabled().
1265 *
1266 * As this is used to determine ordering between tasks of sibling CPUs,
1267 * it may be better to use per-core dispatch sequence instead.
1268 */
1269 if (!sched_core_disabled())
1270 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
1271 #endif
1272 }
1273
1274 /**
1275 * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
1276 * @rq: rq to read clock from, must be locked
1277 * @p: task being dispatched
1278 *
1279 * If the BPF scheduler implements custom core-sched ordering via
1280 * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
1281 * ordering within each local DSQ. This function is called from dispatch paths
1282 * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
1283 */
touch_core_sched_dispatch(struct rq * rq,struct task_struct * p)1284 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
1285 {
1286 lockdep_assert_rq_held(rq);
1287
1288 #ifdef CONFIG_SCHED_CORE
1289 if (unlikely(SCX_HAS_OP(scx_root, core_sched_before)))
1290 touch_core_sched(rq, p);
1291 #endif
1292 }
1293
update_curr_scx(struct rq * rq)1294 static void update_curr_scx(struct rq *rq)
1295 {
1296 struct task_struct *curr = rq->curr;
1297 s64 delta_exec;
1298
1299 delta_exec = update_curr_common(rq);
1300 if (unlikely(delta_exec <= 0))
1301 return;
1302
1303 if (curr->scx.slice != SCX_SLICE_INF) {
1304 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
1305 if (!curr->scx.slice)
1306 touch_core_sched(rq, curr);
1307 }
1308
1309 dl_server_update(&rq->ext_server, delta_exec);
1310 }
1311
scx_dsq_priq_less(struct rb_node * node_a,const struct rb_node * node_b)1312 static bool scx_dsq_priq_less(struct rb_node *node_a,
1313 const struct rb_node *node_b)
1314 {
1315 const struct task_struct *a =
1316 container_of(node_a, struct task_struct, scx.dsq_priq);
1317 const struct task_struct *b =
1318 container_of(node_b, struct task_struct, scx.dsq_priq);
1319
1320 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
1321 }
1322
dsq_inc_nr(struct scx_dispatch_q * dsq,struct task_struct * p,u64 enq_flags)1323 static void dsq_inc_nr(struct scx_dispatch_q *dsq, struct task_struct *p, u64 enq_flags)
1324 {
1325 /* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
1326 WRITE_ONCE(dsq->nr, dsq->nr + 1);
1327
1328 /*
1329 * Once @p reaches a local DSQ, it can only leave it by being dispatched
1330 * to the CPU or dequeued. In both cases, the only way @p can go back to
1331 * the BPF sched is through enqueueing. If being inserted into a local
1332 * DSQ with IMMED, persist the state until the next enqueueing event in
1333 * do_enqueue_task() so that we can maintain IMMED protection through
1334 * e.g. SAVE/RESTORE cycles and slice extensions.
1335 */
1336 if (enq_flags & SCX_ENQ_IMMED) {
1337 if (unlikely(dsq->id != SCX_DSQ_LOCAL)) {
1338 WARN_ON_ONCE(!(enq_flags & SCX_ENQ_GDSQ_FALLBACK));
1339 return;
1340 }
1341 p->scx.flags |= SCX_TASK_IMMED;
1342 }
1343
1344 if (p->scx.flags & SCX_TASK_IMMED) {
1345 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1346
1347 if (WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
1348 return;
1349
1350 rq->scx.nr_immed++;
1351
1352 /*
1353 * If @rq already had other tasks or the current task is not
1354 * done yet, @p can't go on the CPU immediately. Re-enqueue.
1355 */
1356 if (unlikely(dsq->nr > 1 || !rq_is_open(rq, enq_flags)))
1357 schedule_reenq_local(rq, 0);
1358 }
1359 }
1360
dsq_dec_nr(struct scx_dispatch_q * dsq,struct task_struct * p)1361 static void dsq_dec_nr(struct scx_dispatch_q *dsq, struct task_struct *p)
1362 {
1363 /* see dsq_inc_nr() */
1364 WRITE_ONCE(dsq->nr, dsq->nr - 1);
1365
1366 if (p->scx.flags & SCX_TASK_IMMED) {
1367 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1368
1369 if (WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL) ||
1370 WARN_ON_ONCE(rq->scx.nr_immed <= 0))
1371 return;
1372
1373 rq->scx.nr_immed--;
1374 }
1375 }
1376
refill_task_slice_dfl(struct scx_sched * sch,struct task_struct * p)1377 static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p)
1378 {
1379 p->scx.slice = READ_ONCE(sch->slice_dfl);
1380 __scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1);
1381 }
1382
1383 /*
1384 * Return true if @p is moving due to an internal SCX migration, false
1385 * otherwise.
1386 */
task_scx_migrating(struct task_struct * p)1387 static inline bool task_scx_migrating(struct task_struct *p)
1388 {
1389 /*
1390 * We only need to check sticky_cpu: it is set to the destination
1391 * CPU in move_remote_task_to_local_dsq() before deactivate_task()
1392 * and cleared when the task is enqueued on the destination, so it
1393 * is only non-negative during an internal SCX migration.
1394 */
1395 return p->scx.sticky_cpu >= 0;
1396 }
1397
1398 /*
1399 * Call ops.dequeue() if the task is in BPF custody and not migrating.
1400 * Clears %SCX_TASK_IN_CUSTODY when the callback is invoked.
1401 */
call_task_dequeue(struct scx_sched * sch,struct rq * rq,struct task_struct * p,u64 deq_flags)1402 static void call_task_dequeue(struct scx_sched *sch, struct rq *rq,
1403 struct task_struct *p, u64 deq_flags)
1404 {
1405 if (!(p->scx.flags & SCX_TASK_IN_CUSTODY) || task_scx_migrating(p))
1406 return;
1407
1408 if (SCX_HAS_OP(sch, dequeue))
1409 SCX_CALL_OP_TASK(sch, dequeue, rq, p, deq_flags);
1410
1411 p->scx.flags &= ~SCX_TASK_IN_CUSTODY;
1412 }
1413
local_dsq_post_enq(struct scx_sched * sch,struct scx_dispatch_q * dsq,struct task_struct * p,u64 enq_flags)1414 static void local_dsq_post_enq(struct scx_sched *sch, struct scx_dispatch_q *dsq,
1415 struct task_struct *p, u64 enq_flags)
1416 {
1417 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1418
1419 call_task_dequeue(sch, rq, p, 0);
1420
1421 /*
1422 * Note that @rq's lock may be dropped between this enqueue and @p
1423 * actually getting on CPU. This gives higher-class tasks (e.g. RT)
1424 * an opportunity to wake up on @rq and prevent @p from running.
1425 * Here are some concrete examples:
1426 *
1427 * Example 1:
1428 *
1429 * We dispatch two tasks from a single ops.dispatch():
1430 * - First, a local task to this CPU's local DSQ;
1431 * - Second, a local/remote task to a remote CPU's local DSQ.
1432 * We must drop the local rq lock in order to finish the second
1433 * dispatch. In that time, an RT task can wake up on the local rq.
1434 *
1435 * Example 2:
1436 *
1437 * We dispatch a local/remote task to a remote CPU's local DSQ.
1438 * We must drop the remote rq lock before the dispatched task can run,
1439 * which gives an RT task an opportunity to wake up on the remote rq.
1440 *
1441 * Both examples work the same if we replace dispatching with moving
1442 * the tasks from a user-created DSQ.
1443 *
1444 * We must detect these wakeups so that we can re-enqueue IMMED tasks
1445 * from @rq's local DSQ. scx_wakeup_preempt() serves exactly this
1446 * purpose, but for it to be invoked, we must ensure that we bump
1447 * @rq->next_class to &ext_sched_class if it's currently idle.
1448 *
1449 * wakeup_preempt() does the bumping, and since we only invoke it if
1450 * @rq->next_class is below &ext_sched_class, it will also
1451 * resched_curr(rq).
1452 */
1453 if (sched_class_above(p->sched_class, rq->next_class))
1454 wakeup_preempt(rq, p, 0);
1455
1456 /*
1457 * If @rq is in balance, the CPU is already vacant and looking for the
1458 * next task to run. No need to preempt or trigger resched after moving
1459 * @p into its local DSQ.
1460 * Note that the wakeup_preempt() above may have already triggered
1461 * a resched if @rq->next_class was idle. It's harmless, since
1462 * need_resched is cleared immediately after task pick.
1463 */
1464 if (rq->scx.flags & SCX_RQ_IN_BALANCE)
1465 return;
1466
1467 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
1468 rq->curr->sched_class == &ext_sched_class) {
1469 rq->curr->scx.slice = 0;
1470 resched_curr(rq);
1471 }
1472 }
1473
dispatch_enqueue(struct scx_sched * sch,struct rq * rq,struct scx_dispatch_q * dsq,struct task_struct * p,u64 enq_flags)1474 static void dispatch_enqueue(struct scx_sched *sch, struct rq *rq,
1475 struct scx_dispatch_q *dsq, struct task_struct *p,
1476 u64 enq_flags)
1477 {
1478 bool is_local = dsq->id == SCX_DSQ_LOCAL;
1479
1480 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1481 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1482 !RB_EMPTY_NODE(&p->scx.dsq_priq));
1483
1484 if (!is_local) {
1485 raw_spin_lock_nested(&dsq->lock,
1486 (enq_flags & SCX_ENQ_NESTED) ? SINGLE_DEPTH_NESTING : 0);
1487
1488 if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1489 scx_error(sch, "attempting to dispatch to a destroyed dsq");
1490 /* fall back to the global dsq */
1491 raw_spin_unlock(&dsq->lock);
1492 dsq = find_global_dsq(sch, task_cpu(p));
1493 raw_spin_lock(&dsq->lock);
1494 }
1495 }
1496
1497 if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1498 (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1499 /*
1500 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1501 * their FIFO queues. To avoid confusion and accidentally
1502 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1503 * disallow any internal DSQ from doing vtime ordering of
1504 * tasks.
1505 */
1506 scx_error(sch, "cannot use vtime ordering for built-in DSQs");
1507 enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1508 }
1509
1510 if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1511 struct rb_node *rbp;
1512
1513 /*
1514 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1515 * linked to both the rbtree and list on PRIQs, this can only be
1516 * tested easily when adding the first task.
1517 */
1518 if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1519 nldsq_next_task(dsq, NULL, false)))
1520 scx_error(sch, "DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1521 dsq->id);
1522
1523 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1524 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1525
1526 /*
1527 * Find the previous task and insert after it on the list so
1528 * that @dsq->list is vtime ordered.
1529 */
1530 rbp = rb_prev(&p->scx.dsq_priq);
1531 if (rbp) {
1532 struct task_struct *prev =
1533 container_of(rbp, struct task_struct,
1534 scx.dsq_priq);
1535 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
1536 /* first task unchanged - no update needed */
1537 } else {
1538 list_add(&p->scx.dsq_list.node, &dsq->list);
1539 /* not builtin and new task is at head - use fastpath */
1540 rcu_assign_pointer(dsq->first_task, p);
1541 }
1542 } else {
1543 /* a FIFO DSQ shouldn't be using PRIQ enqueuing */
1544 if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
1545 scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
1546 dsq->id);
1547
1548 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) {
1549 list_add(&p->scx.dsq_list.node, &dsq->list);
1550 /* new task inserted at head - use fastpath */
1551 if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN))
1552 rcu_assign_pointer(dsq->first_task, p);
1553 } else {
1554 /*
1555 * dsq->list can contain parked BPF iterator cursors, so
1556 * list_empty() here isn't a reliable proxy for "no real
1557 * task in the DSQ". Test dsq->first_task directly.
1558 */
1559 list_add_tail(&p->scx.dsq_list.node, &dsq->list);
1560 if (!dsq->first_task && !(dsq->id & SCX_DSQ_FLAG_BUILTIN))
1561 rcu_assign_pointer(dsq->first_task, p);
1562 }
1563 }
1564
1565 /* seq records the order tasks are queued, used by BPF DSQ iterator */
1566 WRITE_ONCE(dsq->seq, dsq->seq + 1);
1567 p->scx.dsq_seq = dsq->seq;
1568
1569 dsq_inc_nr(dsq, p, enq_flags);
1570 p->scx.dsq = dsq;
1571
1572 /*
1573 * Update custody and call ops.dequeue() before clearing ops_state:
1574 * once ops_state is cleared, waiters in ops_dequeue() can proceed
1575 * and dequeue_task_scx() will RMW p->scx.flags. If we clear
1576 * ops_state first, both sides would modify p->scx.flags
1577 * concurrently in a non-atomic way.
1578 */
1579 if (is_local) {
1580 local_dsq_post_enq(sch, dsq, p, enq_flags);
1581 } else {
1582 /*
1583 * Task on global/bypass DSQ: leave custody, task on
1584 * non-terminal DSQ: enter custody.
1585 */
1586 if (dsq->id == SCX_DSQ_GLOBAL || dsq->id == SCX_DSQ_BYPASS)
1587 call_task_dequeue(sch, rq, p, 0);
1588 else
1589 p->scx.flags |= SCX_TASK_IN_CUSTODY;
1590
1591 raw_spin_unlock(&dsq->lock);
1592 }
1593
1594 /*
1595 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
1596 * match waiters' load_acquire.
1597 */
1598 if (enq_flags & SCX_ENQ_CLEAR_OPSS)
1599 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1600 }
1601
task_unlink_from_dsq(struct task_struct * p,struct scx_dispatch_q * dsq)1602 static void task_unlink_from_dsq(struct task_struct *p,
1603 struct scx_dispatch_q *dsq)
1604 {
1605 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
1606
1607 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
1608 rb_erase(&p->scx.dsq_priq, &dsq->priq);
1609 RB_CLEAR_NODE(&p->scx.dsq_priq);
1610 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1611 }
1612
1613 list_del_init(&p->scx.dsq_list.node);
1614 dsq_dec_nr(dsq, p);
1615
1616 if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN) && dsq->first_task == p) {
1617 struct task_struct *first_task;
1618
1619 first_task = nldsq_next_task(dsq, NULL, false);
1620 rcu_assign_pointer(dsq->first_task, first_task);
1621 }
1622 }
1623
dispatch_dequeue(struct rq * rq,struct task_struct * p)1624 static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
1625 {
1626 struct scx_dispatch_q *dsq = p->scx.dsq;
1627 bool is_local = dsq == &rq->scx.local_dsq;
1628
1629 lockdep_assert_rq_held(rq);
1630
1631 if (!dsq) {
1632 /*
1633 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
1634 * Unlinking is all that's needed to cancel.
1635 */
1636 if (unlikely(!list_empty(&p->scx.dsq_list.node)))
1637 list_del_init(&p->scx.dsq_list.node);
1638
1639 /*
1640 * When dispatching directly from the BPF scheduler to a local
1641 * DSQ, the task isn't associated with any DSQ but
1642 * @p->scx.holding_cpu may be set under the protection of
1643 * %SCX_OPSS_DISPATCHING.
1644 */
1645 if (p->scx.holding_cpu >= 0)
1646 p->scx.holding_cpu = -1;
1647
1648 return;
1649 }
1650
1651 if (!is_local)
1652 raw_spin_lock(&dsq->lock);
1653
1654 /*
1655 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1656 * change underneath us.
1657 */
1658 if (p->scx.holding_cpu < 0) {
1659 /* @p must still be on @dsq, dequeue */
1660 task_unlink_from_dsq(p, dsq);
1661 } else {
1662 /*
1663 * We're racing against dispatch_to_local_dsq() which already
1664 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1665 * holding_cpu which tells dispatch_to_local_dsq() that it lost
1666 * the race.
1667 */
1668 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
1669 p->scx.holding_cpu = -1;
1670 }
1671 p->scx.dsq = NULL;
1672
1673 if (!is_local)
1674 raw_spin_unlock(&dsq->lock);
1675 }
1676
1677 /*
1678 * Abbreviated version of dispatch_dequeue() that can be used when both @p's rq
1679 * and dsq are locked.
1680 */
dispatch_dequeue_locked(struct task_struct * p,struct scx_dispatch_q * dsq)1681 static void dispatch_dequeue_locked(struct task_struct *p,
1682 struct scx_dispatch_q *dsq)
1683 {
1684 lockdep_assert_rq_held(task_rq(p));
1685 lockdep_assert_held(&dsq->lock);
1686
1687 task_unlink_from_dsq(p, dsq);
1688 p->scx.dsq = NULL;
1689 }
1690
find_dsq_for_dispatch(struct scx_sched * sch,struct rq * rq,u64 dsq_id,s32 tcpu)1691 static struct scx_dispatch_q *find_dsq_for_dispatch(struct scx_sched *sch,
1692 struct rq *rq, u64 dsq_id,
1693 s32 tcpu)
1694 {
1695 struct scx_dispatch_q *dsq;
1696
1697 if (dsq_id == SCX_DSQ_LOCAL)
1698 return &rq->scx.local_dsq;
1699
1700 if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
1701 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
1702
1703 if (!ops_cpu_valid(sch, cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
1704 return find_global_dsq(sch, tcpu);
1705
1706 return &cpu_rq(cpu)->scx.local_dsq;
1707 }
1708
1709 if (dsq_id == SCX_DSQ_GLOBAL)
1710 dsq = find_global_dsq(sch, tcpu);
1711 else
1712 dsq = find_user_dsq(sch, dsq_id);
1713
1714 if (unlikely(!dsq)) {
1715 scx_error(sch, "non-existent DSQ 0x%llx", dsq_id);
1716 return find_global_dsq(sch, tcpu);
1717 }
1718
1719 return dsq;
1720 }
1721
mark_direct_dispatch(struct scx_sched * sch,struct task_struct * ddsp_task,struct task_struct * p,u64 dsq_id,u64 enq_flags)1722 static void mark_direct_dispatch(struct scx_sched *sch,
1723 struct task_struct *ddsp_task,
1724 struct task_struct *p, u64 dsq_id,
1725 u64 enq_flags)
1726 {
1727 /*
1728 * Mark that dispatch already happened from ops.select_cpu() or
1729 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
1730 * which can never match a valid task pointer.
1731 */
1732 __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
1733
1734 /* @p must match the task on the enqueue path */
1735 if (unlikely(p != ddsp_task)) {
1736 if (IS_ERR(ddsp_task))
1737 scx_error(sch, "%s[%d] already direct-dispatched",
1738 p->comm, p->pid);
1739 else
1740 scx_error(sch, "scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
1741 ddsp_task->comm, ddsp_task->pid,
1742 p->comm, p->pid);
1743 return;
1744 }
1745
1746 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
1747 WARN_ON_ONCE(p->scx.ddsp_enq_flags);
1748
1749 p->scx.ddsp_dsq_id = dsq_id;
1750 p->scx.ddsp_enq_flags = enq_flags;
1751 }
1752
1753 /*
1754 * Clear @p direct dispatch state when leaving the scheduler.
1755 *
1756 * Direct dispatch state must be cleared in the following cases:
1757 * - direct_dispatch(): cleared on the synchronous enqueue path, deferred
1758 * dispatch keeps the state until consumed
1759 * - process_ddsp_deferred_locals(): cleared after consuming deferred state,
1760 * - do_enqueue_task(): cleared on enqueue fallbacks where the dispatch
1761 * verdict is ignored (local/global/bypass)
1762 * - dequeue_task_scx(): cleared after dispatch_dequeue(), covering deferred
1763 * cancellation and holding_cpu races
1764 * - scx_disable_task(): cleared for queued wakeup tasks, which are excluded by
1765 * the scx_bypass() loop, so that stale state is not reused by a subsequent
1766 * scheduler instance
1767 */
clear_direct_dispatch(struct task_struct * p)1768 static inline void clear_direct_dispatch(struct task_struct *p)
1769 {
1770 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1771 p->scx.ddsp_enq_flags = 0;
1772 }
1773
direct_dispatch(struct scx_sched * sch,struct task_struct * p,u64 enq_flags)1774 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
1775 u64 enq_flags)
1776 {
1777 struct rq *rq = task_rq(p);
1778 struct scx_dispatch_q *dsq =
1779 find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, task_cpu(p));
1780 u64 ddsp_enq_flags;
1781
1782 touch_core_sched_dispatch(rq, p);
1783
1784 p->scx.ddsp_enq_flags |= enq_flags;
1785
1786 /*
1787 * We are in the enqueue path with @rq locked and pinned, and thus can't
1788 * double lock a remote rq and enqueue to its local DSQ. For
1789 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
1790 * the enqueue so that it's executed when @rq can be unlocked.
1791 */
1792 if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
1793 unsigned long opss;
1794
1795 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
1796
1797 switch (opss & SCX_OPSS_STATE_MASK) {
1798 case SCX_OPSS_NONE:
1799 break;
1800 case SCX_OPSS_QUEUEING:
1801 /*
1802 * As @p was never passed to the BPF side, _release is
1803 * not strictly necessary. Still do it for consistency.
1804 */
1805 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1806 break;
1807 default:
1808 WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
1809 p->comm, p->pid, opss);
1810 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1811 break;
1812 }
1813
1814 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1815 list_add_tail(&p->scx.dsq_list.node,
1816 &rq->scx.ddsp_deferred_locals);
1817 schedule_deferred_locked(rq);
1818 return;
1819 }
1820
1821 ddsp_enq_flags = p->scx.ddsp_enq_flags;
1822 clear_direct_dispatch(p);
1823
1824 dispatch_enqueue(sch, rq, dsq, p, ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1825 }
1826
scx_rq_online(struct rq * rq)1827 static bool scx_rq_online(struct rq *rq)
1828 {
1829 /*
1830 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
1831 * the online state as seen from the BPF scheduler. cpu_active() test
1832 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
1833 * stay set until the current scheduling operation is complete even if
1834 * we aren't locking @rq.
1835 */
1836 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
1837 }
1838
do_enqueue_task(struct rq * rq,struct task_struct * p,u64 enq_flags,int sticky_cpu)1839 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
1840 int sticky_cpu)
1841 {
1842 struct scx_sched *sch = scx_task_sched(p);
1843 struct task_struct **ddsp_taskp;
1844 struct scx_dispatch_q *dsq;
1845 unsigned long qseq;
1846
1847 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
1848
1849 /* internal movements - rq migration / RESTORE */
1850 if (sticky_cpu == cpu_of(rq))
1851 goto local_norefill;
1852
1853 /*
1854 * Clear persistent TASK_IMMED for fresh enqueues, see dsq_inc_nr().
1855 * Note that exiting and migration-disabled tasks that skip
1856 * ops.enqueue() below will lose IMMED protection unless
1857 * %SCX_OPS_ENQ_EXITING / %SCX_OPS_ENQ_MIGRATION_DISABLED are set.
1858 */
1859 p->scx.flags &= ~SCX_TASK_IMMED;
1860
1861 /*
1862 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
1863 * is offline and are just running the hotplug path. Don't bother the
1864 * BPF scheduler.
1865 */
1866 if (!scx_rq_online(rq))
1867 goto local;
1868
1869 if (scx_bypassing(sch, cpu_of(rq))) {
1870 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
1871 goto bypass;
1872 }
1873
1874 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1875 goto direct;
1876
1877 /* see %SCX_OPS_ENQ_EXITING */
1878 if (!(sch->ops.flags & SCX_OPS_ENQ_EXITING) &&
1879 unlikely(p->flags & PF_EXITING)) {
1880 __scx_add_event(sch, SCX_EV_ENQ_SKIP_EXITING, 1);
1881 goto local;
1882 }
1883
1884 /* see %SCX_OPS_ENQ_MIGRATION_DISABLED */
1885 if (!(sch->ops.flags & SCX_OPS_ENQ_MIGRATION_DISABLED) &&
1886 is_migration_disabled(p)) {
1887 __scx_add_event(sch, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1);
1888 goto local;
1889 }
1890
1891 if (unlikely(!SCX_HAS_OP(sch, enqueue)))
1892 goto global;
1893
1894 /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
1895 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
1896
1897 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
1898 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
1899
1900 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
1901 WARN_ON_ONCE(*ddsp_taskp);
1902 *ddsp_taskp = p;
1903
1904 SCX_CALL_OP_TASK(sch, enqueue, rq, p, enq_flags);
1905
1906 *ddsp_taskp = NULL;
1907 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1908 goto direct;
1909
1910 /*
1911 * Task is now in BPF scheduler's custody. Set %SCX_TASK_IN_CUSTODY
1912 * so ops.dequeue() is called when it leaves custody.
1913 */
1914 p->scx.flags |= SCX_TASK_IN_CUSTODY;
1915
1916 /*
1917 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
1918 * dequeue may be waiting. The store_release matches their load_acquire.
1919 */
1920 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
1921 return;
1922
1923 direct:
1924 direct_dispatch(sch, p, enq_flags);
1925 return;
1926 local_norefill:
1927 dispatch_enqueue(sch, rq, &rq->scx.local_dsq, p, enq_flags);
1928 return;
1929 local:
1930 dsq = &rq->scx.local_dsq;
1931 goto enqueue;
1932 global:
1933 dsq = find_global_dsq(sch, task_cpu(p));
1934 goto enqueue;
1935 bypass:
1936 dsq = bypass_enq_target_dsq(sch, task_cpu(p));
1937 goto enqueue;
1938
1939 enqueue:
1940 /*
1941 * For task-ordering, slice refill must be treated as implying the end
1942 * of the current slice. Otherwise, the longer @p stays on the CPU, the
1943 * higher priority it becomes from scx_prio_less()'s POV.
1944 */
1945 touch_core_sched(rq, p);
1946 refill_task_slice_dfl(sch, p);
1947 clear_direct_dispatch(p);
1948 dispatch_enqueue(sch, rq, dsq, p, enq_flags);
1949 }
1950
task_runnable(const struct task_struct * p)1951 static bool task_runnable(const struct task_struct *p)
1952 {
1953 return !list_empty(&p->scx.runnable_node);
1954 }
1955
set_task_runnable(struct rq * rq,struct task_struct * p)1956 static void set_task_runnable(struct rq *rq, struct task_struct *p)
1957 {
1958 lockdep_assert_rq_held(rq);
1959
1960 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
1961 p->scx.runnable_at = jiffies;
1962 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
1963 }
1964
1965 /*
1966 * list_add_tail() must be used. scx_bypass() depends on tasks being
1967 * appended to the runnable_list.
1968 */
1969 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
1970 }
1971
clr_task_runnable(struct task_struct * p,bool reset_runnable_at)1972 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
1973 {
1974 list_del_init(&p->scx.runnable_node);
1975 if (reset_runnable_at)
1976 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
1977 }
1978
enqueue_task_scx(struct rq * rq,struct task_struct * p,int core_enq_flags)1979 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int core_enq_flags)
1980 {
1981 struct scx_sched *sch = scx_task_sched(p);
1982 int sticky_cpu = p->scx.sticky_cpu;
1983 u64 enq_flags = core_enq_flags | rq->scx.extra_enq_flags;
1984
1985 if (enq_flags & ENQUEUE_WAKEUP)
1986 rq->scx.flags |= SCX_RQ_IN_WAKEUP;
1987
1988 /*
1989 * Restoring a running task will be immediately followed by
1990 * set_next_task_scx() which expects the task to not be on the BPF
1991 * scheduler as tasks can only start running through local DSQs. Force
1992 * direct-dispatch into the local DSQ by setting the sticky_cpu.
1993 */
1994 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
1995 sticky_cpu = cpu_of(rq);
1996
1997 if (p->scx.flags & SCX_TASK_QUEUED) {
1998 WARN_ON_ONCE(!task_runnable(p));
1999 goto out;
2000 }
2001
2002 set_task_runnable(rq, p);
2003 p->scx.flags |= SCX_TASK_QUEUED;
2004 rq->scx.nr_running++;
2005 add_nr_running(rq, 1);
2006
2007 if (SCX_HAS_OP(sch, runnable) && !task_on_rq_migrating(p))
2008 SCX_CALL_OP_TASK(sch, runnable, rq, p, enq_flags);
2009
2010 if (enq_flags & SCX_ENQ_WAKEUP)
2011 touch_core_sched(rq, p);
2012
2013 /* Start dl_server if this is the first task being enqueued */
2014 if (rq->scx.nr_running == 1)
2015 dl_server_start(&rq->ext_server);
2016
2017 do_enqueue_task(rq, p, enq_flags, sticky_cpu);
2018
2019 if (sticky_cpu >= 0)
2020 p->scx.sticky_cpu = -1;
2021 out:
2022 rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
2023
2024 if ((enq_flags & SCX_ENQ_CPU_SELECTED) &&
2025 unlikely(cpu_of(rq) != p->scx.selected_cpu))
2026 __scx_add_event(sch, SCX_EV_SELECT_CPU_FALLBACK, 1);
2027 }
2028
ops_dequeue(struct rq * rq,struct task_struct * p,u64 deq_flags)2029 static void ops_dequeue(struct rq *rq, struct task_struct *p, u64 deq_flags)
2030 {
2031 struct scx_sched *sch = scx_task_sched(p);
2032 unsigned long opss;
2033
2034 /* dequeue is always temporary, don't reset runnable_at */
2035 clr_task_runnable(p, false);
2036
2037 /* acquire ensures that we see the preceding updates on QUEUED */
2038 opss = atomic_long_read_acquire(&p->scx.ops_state);
2039
2040 switch (opss & SCX_OPSS_STATE_MASK) {
2041 case SCX_OPSS_NONE:
2042 break;
2043 case SCX_OPSS_QUEUEING:
2044 /*
2045 * QUEUEING is started and finished while holding @p's rq lock.
2046 * As we're holding the rq lock now, we shouldn't see QUEUEING.
2047 */
2048 BUG();
2049 case SCX_OPSS_QUEUED:
2050 /* A queued task must always be in BPF scheduler's custody */
2051 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_IN_CUSTODY));
2052 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2053 SCX_OPSS_NONE))
2054 break;
2055 fallthrough;
2056 case SCX_OPSS_DISPATCHING:
2057 /*
2058 * If @p is being dispatched from the BPF scheduler to a DSQ,
2059 * wait for the transfer to complete so that @p doesn't get
2060 * added to its DSQ after dequeueing is complete.
2061 *
2062 * As we're waiting on DISPATCHING with the rq locked, the
2063 * dispatching side shouldn't try to lock the rq while
2064 * DISPATCHING is set. See dispatch_to_local_dsq().
2065 *
2066 * DISPATCHING shouldn't have qseq set and control can reach
2067 * here with NONE @opss from the above QUEUED case block.
2068 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
2069 */
2070 wait_ops_state(p, SCX_OPSS_DISPATCHING);
2071 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2072 break;
2073 }
2074
2075 /*
2076 * Call ops.dequeue() if the task is still in BPF custody.
2077 *
2078 * The code that clears ops_state to %SCX_OPSS_NONE does not always
2079 * clear %SCX_TASK_IN_CUSTODY: in dispatch_to_local_dsq(), when
2080 * we're moving a task that was in %SCX_OPSS_DISPATCHING to a
2081 * remote CPU's local DSQ, we only set ops_state to %SCX_OPSS_NONE
2082 * so that a concurrent dequeue can proceed, but we clear
2083 * %SCX_TASK_IN_CUSTODY only when we later enqueue or move the
2084 * task. So we can see NONE + IN_CUSTODY here and we must handle
2085 * it. Similarly, after waiting on %SCX_OPSS_DISPATCHING we see
2086 * NONE but the task may still have %SCX_TASK_IN_CUSTODY set until
2087 * it is enqueued on the destination.
2088 */
2089 call_task_dequeue(sch, rq, p, deq_flags);
2090 }
2091
dequeue_task_scx(struct rq * rq,struct task_struct * p,int core_deq_flags)2092 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int core_deq_flags)
2093 {
2094 struct scx_sched *sch = scx_task_sched(p);
2095 u64 deq_flags = core_deq_flags;
2096
2097 /*
2098 * Set %SCX_DEQ_SCHED_CHANGE when the dequeue is due to a property
2099 * change (not sleep or core-sched pick).
2100 */
2101 if (!(deq_flags & (DEQUEUE_SLEEP | SCX_DEQ_CORE_SCHED_EXEC)))
2102 deq_flags |= SCX_DEQ_SCHED_CHANGE;
2103
2104 if (!(p->scx.flags & SCX_TASK_QUEUED)) {
2105 WARN_ON_ONCE(task_runnable(p));
2106 return true;
2107 }
2108
2109 ops_dequeue(rq, p, deq_flags);
2110
2111 /*
2112 * A currently running task which is going off @rq first gets dequeued
2113 * and then stops running. As we want running <-> stopping transitions
2114 * to be contained within runnable <-> quiescent transitions, trigger
2115 * ->stopping() early here instead of in put_prev_task_scx().
2116 *
2117 * @p may go through multiple stopping <-> running transitions between
2118 * here and put_prev_task_scx() if task attribute changes occur while
2119 * balance_one() leaves @rq unlocked. However, they don't contain any
2120 * information meaningful to the BPF scheduler and can be suppressed by
2121 * skipping the callbacks if the task is !QUEUED.
2122 */
2123 if (SCX_HAS_OP(sch, stopping) && task_current(rq, p)) {
2124 update_curr_scx(rq);
2125 SCX_CALL_OP_TASK(sch, stopping, rq, p, false);
2126 }
2127
2128 if (SCX_HAS_OP(sch, quiescent) && !task_on_rq_migrating(p))
2129 SCX_CALL_OP_TASK(sch, quiescent, rq, p, deq_flags);
2130
2131 if (deq_flags & SCX_DEQ_SLEEP)
2132 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
2133 else
2134 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
2135
2136 p->scx.flags &= ~SCX_TASK_QUEUED;
2137 rq->scx.nr_running--;
2138 sub_nr_running(rq, 1);
2139
2140 dispatch_dequeue(rq, p);
2141 clear_direct_dispatch(p);
2142 return true;
2143 }
2144
yield_task_scx(struct rq * rq)2145 static void yield_task_scx(struct rq *rq)
2146 {
2147 struct task_struct *p = rq->donor;
2148 struct scx_sched *sch = scx_task_sched(p);
2149
2150 if (SCX_HAS_OP(sch, yield))
2151 SCX_CALL_OP_2TASKS_RET(sch, yield, rq, p, NULL);
2152 else
2153 p->scx.slice = 0;
2154 }
2155
yield_to_task_scx(struct rq * rq,struct task_struct * to)2156 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
2157 {
2158 struct task_struct *from = rq->donor;
2159 struct scx_sched *sch = scx_task_sched(from);
2160
2161 if (SCX_HAS_OP(sch, yield) && sch == scx_task_sched(to))
2162 return SCX_CALL_OP_2TASKS_RET(sch, yield, rq, from, to);
2163 else
2164 return false;
2165 }
2166
wakeup_preempt_scx(struct rq * rq,struct task_struct * p,int wake_flags)2167 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p, int wake_flags)
2168 {
2169 /*
2170 * Preemption between SCX tasks is implemented by resetting the victim
2171 * task's slice to 0 and triggering reschedule on the target CPU.
2172 * Nothing to do.
2173 */
2174 if (p->sched_class == &ext_sched_class)
2175 return;
2176
2177 /*
2178 * Getting preempted by a higher-priority class. Reenqueue IMMED tasks.
2179 * This captures all preemption cases including:
2180 *
2181 * - A SCX task is currently running.
2182 *
2183 * - @rq is waking from idle due to a SCX task waking to it.
2184 *
2185 * - A higher-priority wakes up while SCX dispatch is in progress.
2186 */
2187 if (rq->scx.nr_immed)
2188 schedule_reenq_local(rq, 0);
2189 }
2190
move_local_task_to_local_dsq(struct scx_sched * sch,struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct rq * dst_rq)2191 static void move_local_task_to_local_dsq(struct scx_sched *sch,
2192 struct task_struct *p, u64 enq_flags,
2193 struct scx_dispatch_q *src_dsq,
2194 struct rq *dst_rq)
2195 {
2196 struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
2197
2198 /* @dsq is locked and @p is on @dst_rq */
2199 lockdep_assert_held(&src_dsq->lock);
2200 lockdep_assert_rq_held(dst_rq);
2201
2202 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2203
2204 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
2205 list_add(&p->scx.dsq_list.node, &dst_dsq->list);
2206 else
2207 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
2208
2209 dsq_inc_nr(dst_dsq, p, enq_flags);
2210 p->scx.dsq = dst_dsq;
2211
2212 local_dsq_post_enq(sch, dst_dsq, p, enq_flags);
2213 }
2214
2215 /**
2216 * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
2217 * @p: task to move
2218 * @enq_flags: %SCX_ENQ_*
2219 * @src_rq: rq to move the task from, locked on entry, released on return
2220 * @dst_rq: rq to move the task into, locked on return
2221 *
2222 * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
2223 */
move_remote_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct rq * src_rq,struct rq * dst_rq)2224 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2225 struct rq *src_rq, struct rq *dst_rq)
2226 {
2227 lockdep_assert_rq_held(src_rq);
2228
2229 /*
2230 * Set sticky_cpu before deactivate_task() to properly mark the
2231 * beginning of an SCX-internal migration.
2232 */
2233 p->scx.sticky_cpu = cpu_of(dst_rq);
2234 deactivate_task(src_rq, p, 0);
2235 set_task_cpu(p, cpu_of(dst_rq));
2236
2237 raw_spin_rq_unlock(src_rq);
2238 raw_spin_rq_lock(dst_rq);
2239
2240 /*
2241 * We want to pass scx-specific enq_flags but activate_task() will
2242 * truncate the upper 32 bit. As we own @rq, we can pass them through
2243 * @rq->scx.extra_enq_flags instead.
2244 */
2245 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
2246 WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
2247 dst_rq->scx.extra_enq_flags = enq_flags;
2248 activate_task(dst_rq, p, 0);
2249 dst_rq->scx.extra_enq_flags = 0;
2250 }
2251
2252 /*
2253 * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
2254 * differences:
2255 *
2256 * - is_cpu_allowed() asks "Can this task run on this CPU?" while
2257 * task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
2258 * this CPU?".
2259 *
2260 * While migration is disabled, is_cpu_allowed() has to say "yes" as the task
2261 * must be allowed to finish on the CPU that it's currently on regardless of
2262 * the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
2263 * BPF scheduler shouldn't attempt to migrate a task which has migration
2264 * disabled.
2265 *
2266 * - The BPF scheduler is bypassed while the rq is offline and we can always say
2267 * no to the BPF scheduler initiated migrations while offline.
2268 *
2269 * The caller must ensure that @p and @rq are on different CPUs.
2270 */
task_can_run_on_remote_rq(struct scx_sched * sch,struct task_struct * p,struct rq * rq,bool enforce)2271 static bool task_can_run_on_remote_rq(struct scx_sched *sch,
2272 struct task_struct *p, struct rq *rq,
2273 bool enforce)
2274 {
2275 s32 cpu = cpu_of(rq);
2276
2277 WARN_ON_ONCE(task_cpu(p) == cpu);
2278
2279 /*
2280 * If @p has migration disabled, @p->cpus_ptr is updated to contain only
2281 * the pinned CPU in migrate_disable_switch() while @p is being switched
2282 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is
2283 * updated and thus another CPU may see @p on a DSQ inbetween leading to
2284 * @p passing the below task_allowed_on_cpu() check while migration is
2285 * disabled.
2286 *
2287 * Test the migration disabled state first as the race window is narrow
2288 * and the BPF scheduler failing to check migration disabled state can
2289 * easily be masked if task_allowed_on_cpu() is done first.
2290 */
2291 if (unlikely(is_migration_disabled(p))) {
2292 if (enforce)
2293 scx_error(sch, "SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
2294 p->comm, p->pid, task_cpu(p), cpu);
2295 return false;
2296 }
2297
2298 /*
2299 * We don't require the BPF scheduler to avoid dispatching to offline
2300 * CPUs mostly for convenience but also because CPUs can go offline
2301 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the
2302 * picked CPU is outside the allowed mask.
2303 */
2304 if (!task_allowed_on_cpu(p, cpu)) {
2305 if (enforce)
2306 scx_error(sch, "SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
2307 cpu, p->comm, p->pid);
2308 return false;
2309 }
2310
2311 if (!scx_rq_online(rq)) {
2312 if (enforce)
2313 __scx_add_event(sch, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1);
2314 return false;
2315 }
2316
2317 return true;
2318 }
2319
2320 /**
2321 * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
2322 * @p: target task
2323 * @dsq: locked DSQ @p is currently on
2324 * @src_rq: rq @p is currently on, stable with @dsq locked
2325 *
2326 * Called with @dsq locked but no rq's locked. We want to move @p to a different
2327 * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
2328 * required when transferring into a local DSQ. Even when transferring into a
2329 * non-local DSQ, it's better to use the same mechanism to protect against
2330 * dequeues and maintain the invariant that @p->scx.dsq can only change while
2331 * @src_rq is locked, which e.g. scx_dump_task() depends on.
2332 *
2333 * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
2334 * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
2335 * this may race with dequeue, which can't drop the rq lock or fail, do a little
2336 * dancing from our side.
2337 *
2338 * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
2339 * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
2340 * would be cleared to -1. While other cpus may have updated it to different
2341 * values afterwards, as this operation can't be preempted or recurse, the
2342 * holding_cpu can never become this CPU again before we're done. Thus, we can
2343 * tell whether we lost to dequeue by testing whether the holding_cpu still
2344 * points to this CPU. See dispatch_dequeue() for the counterpart.
2345 *
2346 * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
2347 * still valid. %false if lost to dequeue.
2348 */
unlink_dsq_and_lock_src_rq(struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)2349 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
2350 struct scx_dispatch_q *dsq,
2351 struct rq *src_rq)
2352 {
2353 s32 cpu = raw_smp_processor_id();
2354
2355 lockdep_assert_held(&dsq->lock);
2356
2357 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2358 task_unlink_from_dsq(p, dsq);
2359 p->scx.holding_cpu = cpu;
2360
2361 raw_spin_unlock(&dsq->lock);
2362 raw_spin_rq_lock(src_rq);
2363
2364 /* task_rq couldn't have changed if we're still the holding cpu */
2365 return likely(p->scx.holding_cpu == cpu) &&
2366 !WARN_ON_ONCE(src_rq != task_rq(p));
2367 }
2368
consume_remote_task(struct rq * this_rq,struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * dsq,struct rq * src_rq)2369 static bool consume_remote_task(struct rq *this_rq,
2370 struct task_struct *p, u64 enq_flags,
2371 struct scx_dispatch_q *dsq, struct rq *src_rq)
2372 {
2373 raw_spin_rq_unlock(this_rq);
2374
2375 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
2376 move_remote_task_to_local_dsq(p, enq_flags, src_rq, this_rq);
2377 return true;
2378 } else {
2379 raw_spin_rq_unlock(src_rq);
2380 raw_spin_rq_lock(this_rq);
2381 return false;
2382 }
2383 }
2384
2385 /**
2386 * move_task_between_dsqs() - Move a task from one DSQ to another
2387 * @sch: scx_sched being operated on
2388 * @p: target task
2389 * @enq_flags: %SCX_ENQ_*
2390 * @src_dsq: DSQ @p is currently on, must not be a local DSQ
2391 * @dst_dsq: DSQ @p is being moved to, can be any DSQ
2392 *
2393 * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
2394 * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
2395 * will change. As @p's task_rq is locked, this function doesn't need to use the
2396 * holding_cpu mechanism.
2397 *
2398 * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
2399 * return value, is locked.
2400 */
move_task_between_dsqs(struct scx_sched * sch,struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct scx_dispatch_q * dst_dsq)2401 static struct rq *move_task_between_dsqs(struct scx_sched *sch,
2402 struct task_struct *p, u64 enq_flags,
2403 struct scx_dispatch_q *src_dsq,
2404 struct scx_dispatch_q *dst_dsq)
2405 {
2406 struct rq *src_rq = task_rq(p), *dst_rq;
2407
2408 BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
2409 lockdep_assert_held(&src_dsq->lock);
2410 lockdep_assert_rq_held(src_rq);
2411
2412 if (dst_dsq->id == SCX_DSQ_LOCAL) {
2413 dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2414 if (src_rq != dst_rq &&
2415 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
2416 dst_dsq = find_global_dsq(sch, task_cpu(p));
2417 dst_rq = src_rq;
2418 enq_flags |= SCX_ENQ_GDSQ_FALLBACK;
2419 }
2420 } else {
2421 /* no need to migrate if destination is a non-local DSQ */
2422 dst_rq = src_rq;
2423 }
2424
2425 /*
2426 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
2427 * CPU, @p will be migrated.
2428 */
2429 if (dst_dsq->id == SCX_DSQ_LOCAL) {
2430 /* @p is going from a non-local DSQ to a local DSQ */
2431 if (src_rq == dst_rq) {
2432 task_unlink_from_dsq(p, src_dsq);
2433 move_local_task_to_local_dsq(sch, p, enq_flags,
2434 src_dsq, dst_rq);
2435 raw_spin_unlock(&src_dsq->lock);
2436 } else {
2437 raw_spin_unlock(&src_dsq->lock);
2438 move_remote_task_to_local_dsq(p, enq_flags,
2439 src_rq, dst_rq);
2440 }
2441 } else {
2442 /*
2443 * @p is going from a non-local DSQ to a non-local DSQ. As
2444 * $src_dsq is already locked, do an abbreviated dequeue.
2445 */
2446 dispatch_dequeue_locked(p, src_dsq);
2447 raw_spin_unlock(&src_dsq->lock);
2448
2449 dispatch_enqueue(sch, dst_rq, dst_dsq, p, enq_flags);
2450 }
2451
2452 return dst_rq;
2453 }
2454
consume_dispatch_q(struct scx_sched * sch,struct rq * rq,struct scx_dispatch_q * dsq,u64 enq_flags)2455 static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq,
2456 struct scx_dispatch_q *dsq, u64 enq_flags)
2457 {
2458 struct task_struct *p;
2459 retry:
2460 /*
2461 * The caller can't expect to successfully consume a task if the task's
2462 * addition to @dsq isn't guaranteed to be visible somehow. Test
2463 * @dsq->list without locking and skip if it seems empty.
2464 */
2465 if (list_empty(&dsq->list))
2466 return false;
2467
2468 raw_spin_lock(&dsq->lock);
2469
2470 nldsq_for_each_task(p, dsq) {
2471 struct rq *task_rq = task_rq(p);
2472
2473 /*
2474 * This loop can lead to multiple lockup scenarios, e.g. the BPF
2475 * scheduler can put an enormous number of affinitized tasks into
2476 * a contended DSQ, or the outer retry loop can repeatedly race
2477 * against scx_bypass() dequeueing tasks from @dsq trying to put
2478 * the system into the bypass mode. This can easily live-lock the
2479 * machine. If aborting, exit from all non-bypass DSQs.
2480 */
2481 if (unlikely(READ_ONCE(sch->aborting)) && dsq->id != SCX_DSQ_BYPASS)
2482 break;
2483
2484 if (rq == task_rq) {
2485 task_unlink_from_dsq(p, dsq);
2486 move_local_task_to_local_dsq(sch, p, enq_flags, dsq, rq);
2487 raw_spin_unlock(&dsq->lock);
2488 return true;
2489 }
2490
2491 if (task_can_run_on_remote_rq(sch, p, rq, false)) {
2492 if (likely(consume_remote_task(rq, p, enq_flags, dsq, task_rq)))
2493 return true;
2494 goto retry;
2495 }
2496 }
2497
2498 raw_spin_unlock(&dsq->lock);
2499 return false;
2500 }
2501
consume_global_dsq(struct scx_sched * sch,struct rq * rq)2502 static bool consume_global_dsq(struct scx_sched *sch, struct rq *rq)
2503 {
2504 int node = cpu_to_node(cpu_of(rq));
2505
2506 return consume_dispatch_q(sch, rq, &sch->pnode[node]->global_dsq, 0);
2507 }
2508
2509 /**
2510 * dispatch_to_local_dsq - Dispatch a task to a local dsq
2511 * @sch: scx_sched being operated on
2512 * @rq: current rq which is locked
2513 * @dst_dsq: destination DSQ
2514 * @p: task to dispatch
2515 * @enq_flags: %SCX_ENQ_*
2516 *
2517 * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
2518 * DSQ. This function performs all the synchronization dancing needed because
2519 * local DSQs are protected with rq locks.
2520 *
2521 * The caller must have exclusive ownership of @p (e.g. through
2522 * %SCX_OPSS_DISPATCHING).
2523 */
dispatch_to_local_dsq(struct scx_sched * sch,struct rq * rq,struct scx_dispatch_q * dst_dsq,struct task_struct * p,u64 enq_flags)2524 static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
2525 struct scx_dispatch_q *dst_dsq,
2526 struct task_struct *p, u64 enq_flags)
2527 {
2528 struct rq *src_rq = task_rq(p);
2529 struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2530 struct rq *locked_rq = rq;
2531
2532 /*
2533 * We're synchronized against dequeue through DISPATCHING. As @p can't
2534 * be dequeued, its task_rq and cpus_allowed are stable too.
2535 *
2536 * If dispatching to @rq that @p is already on, no lock dancing needed.
2537 */
2538 if (rq == src_rq && rq == dst_rq) {
2539 dispatch_enqueue(sch, rq, dst_dsq, p,
2540 enq_flags | SCX_ENQ_CLEAR_OPSS);
2541 return;
2542 }
2543
2544 if (src_rq != dst_rq &&
2545 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
2546 dispatch_enqueue(sch, rq, find_global_dsq(sch, task_cpu(p)), p,
2547 enq_flags | SCX_ENQ_CLEAR_OPSS | SCX_ENQ_GDSQ_FALLBACK);
2548 return;
2549 }
2550
2551 /*
2552 * @p is on a possibly remote @src_rq which we need to lock to move the
2553 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
2554 * on DISPATCHING, so we can't grab @src_rq lock while holding
2555 * DISPATCHING.
2556 *
2557 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2558 * we're moving from a DSQ and use the same mechanism - mark the task
2559 * under transfer with holding_cpu, release DISPATCHING and then follow
2560 * the same protocol. See unlink_dsq_and_lock_src_rq().
2561 */
2562 p->scx.holding_cpu = raw_smp_processor_id();
2563
2564 /* store_release ensures that dequeue sees the above */
2565 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2566
2567 /* switch to @src_rq lock */
2568 if (locked_rq != src_rq) {
2569 raw_spin_rq_unlock(locked_rq);
2570 locked_rq = src_rq;
2571 raw_spin_rq_lock(src_rq);
2572 }
2573
2574 /* task_rq couldn't have changed if we're still the holding cpu */
2575 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2576 !WARN_ON_ONCE(src_rq != task_rq(p))) {
2577 /*
2578 * If @p is staying on the same rq, there's no need to go
2579 * through the full deactivate/activate cycle. Optimize by
2580 * abbreviating move_remote_task_to_local_dsq().
2581 */
2582 if (src_rq == dst_rq) {
2583 p->scx.holding_cpu = -1;
2584 dispatch_enqueue(sch, dst_rq, &dst_rq->scx.local_dsq, p,
2585 enq_flags);
2586 } else {
2587 move_remote_task_to_local_dsq(p, enq_flags,
2588 src_rq, dst_rq);
2589 /* task has been moved to dst_rq, which is now locked */
2590 locked_rq = dst_rq;
2591 }
2592
2593 /* if the destination CPU is idle, wake it up */
2594 if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2595 resched_curr(dst_rq);
2596 }
2597
2598 /* switch back to @rq lock */
2599 if (locked_rq != rq) {
2600 raw_spin_rq_unlock(locked_rq);
2601 raw_spin_rq_lock(rq);
2602 }
2603 }
2604
2605 /**
2606 * finish_dispatch - Asynchronously finish dispatching a task
2607 * @rq: current rq which is locked
2608 * @p: task to finish dispatching
2609 * @qseq_at_dispatch: qseq when @p started getting dispatched
2610 * @dsq_id: destination DSQ ID
2611 * @enq_flags: %SCX_ENQ_*
2612 *
2613 * Dispatching to local DSQs may need to wait for queueing to complete or
2614 * require rq lock dancing. As we don't wanna do either while inside
2615 * ops.dispatch() to avoid locking order inversion, we split dispatching into
2616 * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the
2617 * task and its qseq. Once ops.dispatch() returns, this function is called to
2618 * finish up.
2619 *
2620 * There is no guarantee that @p is still valid for dispatching or even that it
2621 * was valid in the first place. Make sure that the task is still owned by the
2622 * BPF scheduler and claim the ownership before dispatching.
2623 */
finish_dispatch(struct scx_sched * sch,struct rq * rq,struct task_struct * p,unsigned long qseq_at_dispatch,u64 dsq_id,u64 enq_flags)2624 static void finish_dispatch(struct scx_sched *sch, struct rq *rq,
2625 struct task_struct *p,
2626 unsigned long qseq_at_dispatch,
2627 u64 dsq_id, u64 enq_flags)
2628 {
2629 struct scx_dispatch_q *dsq;
2630 unsigned long opss;
2631
2632 touch_core_sched_dispatch(rq, p);
2633 retry:
2634 /*
2635 * No need for _acquire here. @p is accessed only after a successful
2636 * try_cmpxchg to DISPATCHING.
2637 */
2638 opss = atomic_long_read(&p->scx.ops_state);
2639
2640 switch (opss & SCX_OPSS_STATE_MASK) {
2641 case SCX_OPSS_DISPATCHING:
2642 case SCX_OPSS_NONE:
2643 /* someone else already got to it */
2644 return;
2645 case SCX_OPSS_QUEUED:
2646 /*
2647 * If qseq doesn't match, @p has gone through at least one
2648 * dispatch/dequeue and re-enqueue cycle between
2649 * scx_bpf_dsq_insert() and here and we have no claim on it.
2650 */
2651 if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
2652 return;
2653
2654 /* see SCX_EV_INSERT_NOT_OWNED definition */
2655 if (unlikely(!scx_task_on_sched(sch, p))) {
2656 __scx_add_event(sch, SCX_EV_INSERT_NOT_OWNED, 1);
2657 return;
2658 }
2659
2660 /*
2661 * While we know @p is accessible, we don't yet have a claim on
2662 * it - the BPF scheduler is allowed to dispatch tasks
2663 * spuriously and there can be a racing dequeue attempt. Let's
2664 * claim @p by atomically transitioning it from QUEUED to
2665 * DISPATCHING.
2666 */
2667 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2668 SCX_OPSS_DISPATCHING)))
2669 break;
2670 goto retry;
2671 case SCX_OPSS_QUEUEING:
2672 /*
2673 * do_enqueue_task() is in the process of transferring the task
2674 * to the BPF scheduler while holding @p's rq lock. As we aren't
2675 * holding any kernel or BPF resource that the enqueue path may
2676 * depend upon, it's safe to wait.
2677 */
2678 wait_ops_state(p, opss);
2679 goto retry;
2680 }
2681
2682 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
2683
2684 dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, task_cpu(p));
2685
2686 if (dsq->id == SCX_DSQ_LOCAL)
2687 dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
2688 else
2689 dispatch_enqueue(sch, rq, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2690 }
2691
flush_dispatch_buf(struct scx_sched * sch,struct rq * rq)2692 static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
2693 {
2694 struct scx_dsp_ctx *dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
2695 u32 u;
2696
2697 for (u = 0; u < dspc->cursor; u++) {
2698 struct scx_dsp_buf_ent *ent = &dspc->buf[u];
2699
2700 finish_dispatch(sch, rq, ent->task, ent->qseq, ent->dsq_id,
2701 ent->enq_flags);
2702 }
2703
2704 dspc->nr_tasks += dspc->cursor;
2705 dspc->cursor = 0;
2706 }
2707
maybe_queue_balance_callback(struct rq * rq)2708 static inline void maybe_queue_balance_callback(struct rq *rq)
2709 {
2710 lockdep_assert_rq_held(rq);
2711
2712 if (!(rq->scx.flags & SCX_RQ_BAL_CB_PENDING))
2713 return;
2714
2715 queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
2716 deferred_bal_cb_workfn);
2717
2718 rq->scx.flags &= ~SCX_RQ_BAL_CB_PENDING;
2719 }
2720
2721 /*
2722 * One user of this function is scx_bpf_dispatch() which can be called
2723 * recursively as sub-sched dispatches nest. Always inline to reduce stack usage
2724 * from the call frame.
2725 */
2726 static __always_inline bool
scx_dispatch_sched(struct scx_sched * sch,struct rq * rq,struct task_struct * prev,bool nested)2727 scx_dispatch_sched(struct scx_sched *sch, struct rq *rq,
2728 struct task_struct *prev, bool nested)
2729 {
2730 struct scx_dsp_ctx *dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
2731 int nr_loops = SCX_DSP_MAX_LOOPS;
2732 s32 cpu = cpu_of(rq);
2733 bool prev_on_sch = (prev->sched_class == &ext_sched_class) &&
2734 scx_task_on_sched(sch, prev);
2735
2736 if (consume_global_dsq(sch, rq))
2737 return true;
2738
2739 if (bypass_dsp_enabled(sch)) {
2740 /* if @sch is bypassing, only the bypass DSQs are active */
2741 if (scx_bypassing(sch, cpu))
2742 return consume_dispatch_q(sch, rq, bypass_dsq(sch, cpu), 0);
2743
2744 #ifdef CONFIG_EXT_SUB_SCHED
2745 /*
2746 * If @sch isn't bypassing but its children are, @sch is
2747 * responsible for making forward progress for both its own
2748 * tasks that aren't bypassing and the bypassing descendants'
2749 * tasks. The following implements a simple built-in behavior -
2750 * let each CPU try to run the bypass DSQ every Nth time.
2751 *
2752 * Later, if necessary, we can add an ops flag to suppress the
2753 * auto-consumption and a kfunc to consume the bypass DSQ and,
2754 * so that the BPF scheduler can fully control scheduling of
2755 * bypassed tasks.
2756 */
2757 struct scx_sched_pcpu *pcpu = per_cpu_ptr(sch->pcpu, cpu);
2758
2759 if (!(pcpu->bypass_host_seq++ % SCX_BYPASS_HOST_NTH) &&
2760 consume_dispatch_q(sch, rq, bypass_dsq(sch, cpu), 0)) {
2761 __scx_add_event(sch, SCX_EV_SUB_BYPASS_DISPATCH, 1);
2762 return true;
2763 }
2764 #endif /* CONFIG_EXT_SUB_SCHED */
2765 }
2766
2767 if (unlikely(!SCX_HAS_OP(sch, dispatch)) || !scx_rq_online(rq))
2768 return false;
2769
2770 dspc->rq = rq;
2771
2772 /*
2773 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
2774 * the local DSQ might still end up empty after a successful
2775 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
2776 * produced some tasks, retry. The BPF scheduler may depend on this
2777 * looping behavior to simplify its implementation.
2778 */
2779 do {
2780 dspc->nr_tasks = 0;
2781
2782 if (nested) {
2783 SCX_CALL_OP(sch, dispatch, rq, cpu, prev_on_sch ? prev : NULL);
2784 } else {
2785 /* stash @prev so that nested invocations can access it */
2786 rq->scx.sub_dispatch_prev = prev;
2787 SCX_CALL_OP(sch, dispatch, rq, cpu, prev_on_sch ? prev : NULL);
2788 rq->scx.sub_dispatch_prev = NULL;
2789 }
2790
2791 flush_dispatch_buf(sch, rq);
2792
2793 if ((prev->scx.flags & SCX_TASK_QUEUED) && prev->scx.slice) {
2794 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2795 return true;
2796 }
2797 if (rq->scx.local_dsq.nr)
2798 return true;
2799 if (consume_global_dsq(sch, rq))
2800 return true;
2801
2802 /*
2803 * ops.dispatch() can trap us in this loop by repeatedly
2804 * dispatching ineligible tasks. Break out once in a while to
2805 * allow the watchdog to run. As IRQ can't be enabled in
2806 * balance(), we want to complete this scheduling cycle and then
2807 * start a new one. IOW, we want to call resched_curr() on the
2808 * next, most likely idle, task, not the current one. Use
2809 * __scx_bpf_kick_cpu() for deferred kicking.
2810 */
2811 if (unlikely(!--nr_loops)) {
2812 scx_kick_cpu(sch, cpu, 0);
2813 break;
2814 }
2815 } while (dspc->nr_tasks);
2816
2817 /*
2818 * Prevent the CPU from going idle while bypassed descendants have tasks
2819 * queued. Without this fallback, bypassed tasks could stall if the host
2820 * scheduler's ops.dispatch() doesn't yield any tasks.
2821 */
2822 if (bypass_dsp_enabled(sch))
2823 return consume_dispatch_q(sch, rq, bypass_dsq(sch, cpu), 0);
2824
2825 return false;
2826 }
2827
balance_one(struct rq * rq,struct task_struct * prev)2828 static int balance_one(struct rq *rq, struct task_struct *prev)
2829 {
2830 struct scx_sched *sch = scx_root;
2831 s32 cpu = cpu_of(rq);
2832
2833 lockdep_assert_rq_held(rq);
2834 rq->scx.flags |= SCX_RQ_IN_BALANCE;
2835 rq->scx.flags &= ~SCX_RQ_BAL_KEEP;
2836
2837 if ((sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT) &&
2838 unlikely(rq->scx.cpu_released)) {
2839 /*
2840 * If the previous sched_class for the current CPU was not SCX,
2841 * notify the BPF scheduler that it again has control of the
2842 * core. This callback complements ->cpu_release(), which is
2843 * emitted in switch_class().
2844 */
2845 if (SCX_HAS_OP(sch, cpu_acquire))
2846 SCX_CALL_OP(sch, cpu_acquire, rq, cpu, NULL);
2847 rq->scx.cpu_released = false;
2848 }
2849
2850 if (prev->sched_class == &ext_sched_class) {
2851 update_curr_scx(rq);
2852
2853 /*
2854 * If @prev is runnable & has slice left, it has priority and
2855 * fetching more just increases latency for the fetched tasks.
2856 * Tell pick_task_scx() to keep running @prev. If the BPF
2857 * scheduler wants to handle this explicitly, it should
2858 * implement ->cpu_release().
2859 *
2860 * See scx_disable_workfn() for the explanation on the bypassing
2861 * test.
2862 */
2863 if ((prev->scx.flags & SCX_TASK_QUEUED) && prev->scx.slice &&
2864 !scx_bypassing(sch, cpu)) {
2865 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2866 goto has_tasks;
2867 }
2868 }
2869
2870 /* if there already are tasks to run, nothing to do */
2871 if (rq->scx.local_dsq.nr)
2872 goto has_tasks;
2873
2874 if (scx_dispatch_sched(sch, rq, prev, false))
2875 goto has_tasks;
2876
2877 /*
2878 * Didn't find another task to run. Keep running @prev unless
2879 * %SCX_OPS_ENQ_LAST is in effect.
2880 */
2881 if ((prev->scx.flags & SCX_TASK_QUEUED) &&
2882 (!(sch->ops.flags & SCX_OPS_ENQ_LAST) || scx_bypassing(sch, cpu))) {
2883 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2884 __scx_add_event(sch, SCX_EV_DISPATCH_KEEP_LAST, 1);
2885 goto has_tasks;
2886 }
2887 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2888 return false;
2889
2890 has_tasks:
2891 /*
2892 * @rq may have extra IMMED tasks without reenq scheduled:
2893 *
2894 * - rq_is_open() can't reliably tell when and how slice is going to be
2895 * modified for $curr and allows IMMED tasks to be queued while
2896 * dispatch is in progress.
2897 *
2898 * - A non-IMMED HEAD task can get queued in front of an IMMED task
2899 * between the IMMED queueing and the subsequent scheduling event.
2900 */
2901 if (unlikely(rq->scx.local_dsq.nr > 1 && rq->scx.nr_immed))
2902 schedule_reenq_local(rq, 0);
2903
2904 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2905 return true;
2906 }
2907
set_next_task_scx(struct rq * rq,struct task_struct * p,bool first)2908 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
2909 {
2910 struct scx_sched *sch = scx_task_sched(p);
2911
2912 if (p->scx.flags & SCX_TASK_QUEUED) {
2913 /*
2914 * Core-sched might decide to execute @p before it is
2915 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
2916 */
2917 ops_dequeue(rq, p, SCX_DEQ_CORE_SCHED_EXEC);
2918 dispatch_dequeue(rq, p);
2919 }
2920
2921 p->se.exec_start = rq_clock_task(rq);
2922
2923 /* see dequeue_task_scx() on why we skip when !QUEUED */
2924 if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED))
2925 SCX_CALL_OP_TASK(sch, running, rq, p);
2926
2927 clr_task_runnable(p, true);
2928
2929 /*
2930 * @p is getting newly scheduled or got kicked after someone updated its
2931 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
2932 */
2933 if ((p->scx.slice == SCX_SLICE_INF) !=
2934 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
2935 if (p->scx.slice == SCX_SLICE_INF)
2936 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
2937 else
2938 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
2939
2940 sched_update_tick_dependency(rq);
2941
2942 /*
2943 * For now, let's refresh the load_avgs just when transitioning
2944 * in and out of nohz. In the future, we might want to add a
2945 * mechanism which calls the following periodically on
2946 * tick-stopped CPUs.
2947 */
2948 update_other_load_avgs(rq);
2949 }
2950 }
2951
2952 static enum scx_cpu_preempt_reason
preempt_reason_from_class(const struct sched_class * class)2953 preempt_reason_from_class(const struct sched_class *class)
2954 {
2955 if (class == &stop_sched_class)
2956 return SCX_CPU_PREEMPT_STOP;
2957 if (class == &dl_sched_class)
2958 return SCX_CPU_PREEMPT_DL;
2959 if (class == &rt_sched_class)
2960 return SCX_CPU_PREEMPT_RT;
2961 return SCX_CPU_PREEMPT_UNKNOWN;
2962 }
2963
switch_class(struct rq * rq,struct task_struct * next)2964 static void switch_class(struct rq *rq, struct task_struct *next)
2965 {
2966 struct scx_sched *sch = scx_root;
2967 const struct sched_class *next_class = next->sched_class;
2968
2969 if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT))
2970 return;
2971
2972 /*
2973 * The callback is conceptually meant to convey that the CPU is no
2974 * longer under the control of SCX. Therefore, don't invoke the callback
2975 * if the next class is below SCX (in which case the BPF scheduler has
2976 * actively decided not to schedule any tasks on the CPU).
2977 */
2978 if (sched_class_above(&ext_sched_class, next_class))
2979 return;
2980
2981 /*
2982 * At this point we know that SCX was preempted by a higher priority
2983 * sched_class, so invoke the ->cpu_release() callback if we have not
2984 * done so already. We only send the callback once between SCX being
2985 * preempted, and it regaining control of the CPU.
2986 *
2987 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
2988 * next time that balance_one() is invoked.
2989 */
2990 if (!rq->scx.cpu_released) {
2991 if (SCX_HAS_OP(sch, cpu_release)) {
2992 struct scx_cpu_release_args args = {
2993 .reason = preempt_reason_from_class(next_class),
2994 .task = next,
2995 };
2996
2997 SCX_CALL_OP(sch, cpu_release, rq, cpu_of(rq), &args);
2998 }
2999 rq->scx.cpu_released = true;
3000 }
3001 }
3002
put_prev_task_scx(struct rq * rq,struct task_struct * p,struct task_struct * next)3003 static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
3004 struct task_struct *next)
3005 {
3006 struct scx_sched *sch = scx_task_sched(p);
3007
3008 /* see kick_sync_wait_bal_cb() */
3009 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
3010
3011 update_curr_scx(rq);
3012
3013 /* see dequeue_task_scx() on why we skip when !QUEUED */
3014 if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED))
3015 SCX_CALL_OP_TASK(sch, stopping, rq, p, true);
3016
3017 if (p->scx.flags & SCX_TASK_QUEUED) {
3018 set_task_runnable(rq, p);
3019
3020 /*
3021 * If @p has slice left and is being put, @p is getting
3022 * preempted by a higher priority scheduler class or core-sched
3023 * forcing a different task. Leave it at the head of the local
3024 * DSQ unless it was an IMMED task. IMMED tasks should not
3025 * linger on a busy CPU, reenqueue them to the BPF scheduler.
3026 */
3027 if (p->scx.slice && !scx_bypassing(sch, cpu_of(rq))) {
3028 if (p->scx.flags & SCX_TASK_IMMED) {
3029 p->scx.flags |= SCX_TASK_REENQ_PREEMPTED;
3030 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
3031 p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
3032 } else {
3033 dispatch_enqueue(sch, rq, &rq->scx.local_dsq, p, SCX_ENQ_HEAD);
3034 }
3035 goto switch_class;
3036 }
3037
3038 /*
3039 * If @p is runnable but we're about to enter a lower
3040 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
3041 * ops.enqueue() that @p is the only one available for this cpu,
3042 * which should trigger an explicit follow-up scheduling event.
3043 */
3044 if (next && sched_class_above(&ext_sched_class, next->sched_class)) {
3045 WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST));
3046 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
3047 } else {
3048 do_enqueue_task(rq, p, 0, -1);
3049 }
3050 }
3051
3052 switch_class:
3053 if (next && next->sched_class != &ext_sched_class)
3054 switch_class(rq, next);
3055 }
3056
kick_sync_wait_bal_cb(struct rq * rq)3057 static void kick_sync_wait_bal_cb(struct rq *rq)
3058 {
3059 struct scx_kick_syncs __rcu *ks = __this_cpu_read(scx_kick_syncs);
3060 unsigned long *ksyncs = rcu_dereference_sched(ks)->syncs;
3061 bool waited;
3062 s32 cpu;
3063
3064 /*
3065 * Drop rq lock and enable IRQs while waiting. IRQs must be enabled
3066 * — a target CPU may be waiting for us to process an IPI (e.g. TLB
3067 * flush) while we wait for its kick_sync to advance.
3068 *
3069 * Also, keep advancing our own kick_sync so that new kick_sync waits
3070 * targeting us, which can start after we drop the lock, cannot form
3071 * cyclic dependencies.
3072 */
3073 retry:
3074 waited = false;
3075 for_each_cpu(cpu, rq->scx.cpus_to_sync) {
3076 /*
3077 * smp_load_acquire() pairs with smp_store_release() on
3078 * kick_sync updates on the target CPUs.
3079 */
3080 if (cpu == cpu_of(rq) ||
3081 smp_load_acquire(&cpu_rq(cpu)->scx.kick_sync) != ksyncs[cpu]) {
3082 cpumask_clear_cpu(cpu, rq->scx.cpus_to_sync);
3083 continue;
3084 }
3085
3086 raw_spin_rq_unlock_irq(rq);
3087 while (READ_ONCE(cpu_rq(cpu)->scx.kick_sync) == ksyncs[cpu]) {
3088 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
3089 cpu_relax();
3090 }
3091 raw_spin_rq_lock_irq(rq);
3092 waited = true;
3093 }
3094
3095 if (waited)
3096 goto retry;
3097 }
3098
first_local_task(struct rq * rq)3099 static struct task_struct *first_local_task(struct rq *rq)
3100 {
3101 return list_first_entry_or_null(&rq->scx.local_dsq.list,
3102 struct task_struct, scx.dsq_list.node);
3103 }
3104
3105 static struct task_struct *
do_pick_task_scx(struct rq * rq,struct rq_flags * rf,bool force_scx)3106 do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx)
3107 {
3108 struct task_struct *prev = rq->curr;
3109 bool keep_prev;
3110 struct task_struct *p;
3111
3112 /* see kick_sync_wait_bal_cb() */
3113 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
3114
3115 rq_modified_begin(rq, &ext_sched_class);
3116
3117 rq_unpin_lock(rq, rf);
3118 balance_one(rq, prev);
3119 rq_repin_lock(rq, rf);
3120 maybe_queue_balance_callback(rq);
3121
3122 /*
3123 * Defer to a balance callback which can drop rq lock and enable
3124 * IRQs. Waiting directly in the pick path would deadlock against
3125 * CPUs sending us IPIs (e.g. TLB flushes) while we wait for them.
3126 */
3127 if (unlikely(rq->scx.kick_sync_pending)) {
3128 rq->scx.kick_sync_pending = false;
3129 queue_balance_callback(rq, &rq->scx.kick_sync_bal_cb,
3130 kick_sync_wait_bal_cb);
3131 }
3132
3133 /*
3134 * If any higher-priority sched class enqueued a runnable task on
3135 * this rq during balance_one(), abort and return RETRY_TASK, so
3136 * that the scheduler loop can restart.
3137 *
3138 * If @force_scx is true, always try to pick a SCHED_EXT task,
3139 * regardless of any higher-priority sched classes activity.
3140 */
3141 if (!force_scx && rq_modified_above(rq, &ext_sched_class))
3142 return RETRY_TASK;
3143
3144 keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
3145 if (unlikely(keep_prev &&
3146 prev->sched_class != &ext_sched_class)) {
3147 WARN_ON_ONCE(scx_enable_state() == SCX_ENABLED);
3148 keep_prev = false;
3149 }
3150
3151 /*
3152 * If balance_one() is telling us to keep running @prev, replenish slice
3153 * if necessary and keep running @prev. Otherwise, pop the first one
3154 * from the local DSQ.
3155 */
3156 if (keep_prev) {
3157 p = prev;
3158 if (!p->scx.slice)
3159 refill_task_slice_dfl(scx_task_sched(p), p);
3160 } else {
3161 p = first_local_task(rq);
3162 if (!p)
3163 return NULL;
3164
3165 if (unlikely(!p->scx.slice)) {
3166 struct scx_sched *sch = scx_task_sched(p);
3167
3168 if (!scx_bypassing(sch, cpu_of(rq)) &&
3169 !sch->warned_zero_slice) {
3170 printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n",
3171 p->comm, p->pid, __func__);
3172 sch->warned_zero_slice = true;
3173 }
3174 refill_task_slice_dfl(sch, p);
3175 }
3176 }
3177
3178 return p;
3179 }
3180
pick_task_scx(struct rq * rq,struct rq_flags * rf)3181 static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf)
3182 {
3183 return do_pick_task_scx(rq, rf, false);
3184 }
3185
3186 /*
3187 * Select the next task to run from the ext scheduling class.
3188 *
3189 * Use do_pick_task_scx() directly with @force_scx enabled, since the
3190 * dl_server must always select a sched_ext task.
3191 */
3192 static struct task_struct *
ext_server_pick_task(struct sched_dl_entity * dl_se,struct rq_flags * rf)3193 ext_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf)
3194 {
3195 if (!scx_enabled())
3196 return NULL;
3197
3198 return do_pick_task_scx(dl_se->rq, rf, true);
3199 }
3200
3201 /*
3202 * Initialize the ext server deadline entity.
3203 */
ext_server_init(struct rq * rq)3204 void ext_server_init(struct rq *rq)
3205 {
3206 struct sched_dl_entity *dl_se = &rq->ext_server;
3207
3208 init_dl_entity(dl_se);
3209
3210 dl_server_init(dl_se, rq, ext_server_pick_task);
3211 }
3212
3213 #ifdef CONFIG_SCHED_CORE
3214 /**
3215 * scx_prio_less - Task ordering for core-sched
3216 * @a: task A
3217 * @b: task B
3218 * @in_fi: in forced idle state
3219 *
3220 * Core-sched is implemented as an additional scheduling layer on top of the
3221 * usual sched_class'es and needs to find out the expected task ordering. For
3222 * SCX, core-sched calls this function to interrogate the task ordering.
3223 *
3224 * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
3225 * to implement the default task ordering. The older the timestamp, the higher
3226 * priority the task - the global FIFO ordering matching the default scheduling
3227 * behavior.
3228 *
3229 * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
3230 * implement FIFO ordering within each local DSQ. See pick_task_scx().
3231 */
scx_prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)3232 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
3233 bool in_fi)
3234 {
3235 struct scx_sched *sch_a = scx_task_sched(a);
3236 struct scx_sched *sch_b = scx_task_sched(b);
3237
3238 /*
3239 * The const qualifiers are dropped from task_struct pointers when
3240 * calling ops.core_sched_before(). Accesses are controlled by the
3241 * verifier.
3242 */
3243 if (sch_a == sch_b && SCX_HAS_OP(sch_a, core_sched_before) &&
3244 !scx_bypassing(sch_a, task_cpu(a)))
3245 return SCX_CALL_OP_2TASKS_RET(sch_a, core_sched_before,
3246 task_rq(a),
3247 (struct task_struct *)a,
3248 (struct task_struct *)b);
3249 else
3250 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
3251 }
3252 #endif /* CONFIG_SCHED_CORE */
3253
select_task_rq_scx(struct task_struct * p,int prev_cpu,int wake_flags)3254 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
3255 {
3256 struct scx_sched *sch = scx_task_sched(p);
3257 bool bypassing;
3258
3259 /*
3260 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
3261 * can be a good migration opportunity with low cache and memory
3262 * footprint. Returning a CPU different than @prev_cpu triggers
3263 * immediate rq migration. However, for SCX, as the current rq
3264 * association doesn't dictate where the task is going to run, this
3265 * doesn't fit well. If necessary, we can later add a dedicated method
3266 * which can decide to preempt self to force it through the regular
3267 * scheduling path.
3268 */
3269 if (unlikely(wake_flags & WF_EXEC))
3270 return prev_cpu;
3271
3272 bypassing = scx_bypassing(sch, task_cpu(p));
3273 if (likely(SCX_HAS_OP(sch, select_cpu)) && !bypassing) {
3274 s32 cpu;
3275 struct task_struct **ddsp_taskp;
3276
3277 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
3278 WARN_ON_ONCE(*ddsp_taskp);
3279 *ddsp_taskp = p;
3280
3281 this_rq()->scx.in_select_cpu = true;
3282 cpu = SCX_CALL_OP_TASK_RET(sch, select_cpu, NULL, p, prev_cpu, wake_flags);
3283 this_rq()->scx.in_select_cpu = false;
3284 p->scx.selected_cpu = cpu;
3285 *ddsp_taskp = NULL;
3286 if (ops_cpu_valid(sch, cpu, "from ops.select_cpu()"))
3287 return cpu;
3288 else
3289 return prev_cpu;
3290 } else {
3291 s32 cpu;
3292
3293 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0);
3294 if (cpu >= 0) {
3295 refill_task_slice_dfl(sch, p);
3296 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
3297 } else {
3298 cpu = prev_cpu;
3299 }
3300 p->scx.selected_cpu = cpu;
3301
3302 if (bypassing)
3303 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
3304 return cpu;
3305 }
3306 }
3307
task_woken_scx(struct rq * rq,struct task_struct * p)3308 static void task_woken_scx(struct rq *rq, struct task_struct *p)
3309 {
3310 run_deferred(rq);
3311 }
3312
set_cpus_allowed_scx(struct task_struct * p,struct affinity_context * ac)3313 static void set_cpus_allowed_scx(struct task_struct *p,
3314 struct affinity_context *ac)
3315 {
3316 struct scx_sched *sch = scx_task_sched(p);
3317
3318 set_cpus_allowed_common(p, ac);
3319
3320 if (task_dead_and_done(p))
3321 return;
3322
3323 /*
3324 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
3325 * differ from the configured one in @p->cpus_mask. Always tell the bpf
3326 * scheduler the effective one.
3327 *
3328 * Fine-grained memory write control is enforced by BPF making the const
3329 * designation pointless. Cast it away when calling the operation.
3330 */
3331 if (SCX_HAS_OP(sch, set_cpumask))
3332 SCX_CALL_OP_TASK(sch, set_cpumask, task_rq(p), p, (struct cpumask *)p->cpus_ptr);
3333 }
3334
handle_hotplug(struct rq * rq,bool online)3335 static void handle_hotplug(struct rq *rq, bool online)
3336 {
3337 struct scx_sched *sch = scx_root;
3338 s32 cpu = cpu_of(rq);
3339
3340 atomic_long_inc(&scx_hotplug_seq);
3341
3342 /*
3343 * scx_root updates are protected by cpus_read_lock() and will stay
3344 * stable here. Note that we can't depend on scx_enabled() test as the
3345 * hotplug ops need to be enabled before __scx_enabled is set.
3346 */
3347 if (unlikely(!sch))
3348 return;
3349
3350 if (scx_enabled())
3351 scx_idle_update_selcpu_topology(&sch->ops);
3352
3353 if (online && SCX_HAS_OP(sch, cpu_online))
3354 SCX_CALL_OP(sch, cpu_online, NULL, cpu);
3355 else if (!online && SCX_HAS_OP(sch, cpu_offline))
3356 SCX_CALL_OP(sch, cpu_offline, NULL, cpu);
3357 else
3358 scx_exit(sch, SCX_EXIT_UNREG_KERN,
3359 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
3360 "cpu %d going %s, exiting scheduler", cpu,
3361 online ? "online" : "offline");
3362 }
3363
scx_rq_activate(struct rq * rq)3364 void scx_rq_activate(struct rq *rq)
3365 {
3366 handle_hotplug(rq, true);
3367 }
3368
scx_rq_deactivate(struct rq * rq)3369 void scx_rq_deactivate(struct rq *rq)
3370 {
3371 handle_hotplug(rq, false);
3372 }
3373
rq_online_scx(struct rq * rq)3374 static void rq_online_scx(struct rq *rq)
3375 {
3376 rq->scx.flags |= SCX_RQ_ONLINE;
3377 }
3378
rq_offline_scx(struct rq * rq)3379 static void rq_offline_scx(struct rq *rq)
3380 {
3381 rq->scx.flags &= ~SCX_RQ_ONLINE;
3382 }
3383
check_rq_for_timeouts(struct rq * rq)3384 static bool check_rq_for_timeouts(struct rq *rq)
3385 {
3386 struct scx_sched *sch;
3387 struct task_struct *p;
3388 struct rq_flags rf;
3389 bool timed_out = false;
3390
3391 rq_lock_irqsave(rq, &rf);
3392 sch = rcu_dereference_bh(scx_root);
3393 if (unlikely(!sch))
3394 goto out_unlock;
3395
3396 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
3397 struct scx_sched *sch = scx_task_sched(p);
3398 unsigned long last_runnable = p->scx.runnable_at;
3399
3400 if (unlikely(time_after(jiffies,
3401 last_runnable + READ_ONCE(sch->watchdog_timeout)))) {
3402 u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
3403
3404 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0,
3405 "%s[%d] failed to run for %u.%03us",
3406 p->comm, p->pid, dur_ms / 1000, dur_ms % 1000);
3407 timed_out = true;
3408 break;
3409 }
3410 }
3411 out_unlock:
3412 rq_unlock_irqrestore(rq, &rf);
3413 return timed_out;
3414 }
3415
scx_watchdog_workfn(struct work_struct * work)3416 static void scx_watchdog_workfn(struct work_struct *work)
3417 {
3418 unsigned long intv;
3419 int cpu;
3420
3421 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
3422
3423 for_each_online_cpu(cpu) {
3424 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
3425 break;
3426
3427 cond_resched();
3428 }
3429
3430 intv = READ_ONCE(scx_watchdog_interval);
3431 if (intv < ULONG_MAX)
3432 queue_delayed_work(system_dfl_wq, to_delayed_work(work), intv);
3433 }
3434
scx_tick(struct rq * rq)3435 void scx_tick(struct rq *rq)
3436 {
3437 struct scx_sched *root;
3438 unsigned long last_check;
3439
3440 if (!scx_enabled())
3441 return;
3442
3443 root = rcu_dereference_bh(scx_root);
3444 if (unlikely(!root))
3445 return;
3446
3447 last_check = READ_ONCE(scx_watchdog_timestamp);
3448 if (unlikely(time_after(jiffies,
3449 last_check + READ_ONCE(root->watchdog_timeout)))) {
3450 u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
3451
3452 scx_exit(root, SCX_EXIT_ERROR_STALL, 0,
3453 "watchdog failed to check in for %u.%03us",
3454 dur_ms / 1000, dur_ms % 1000);
3455 }
3456
3457 update_other_load_avgs(rq);
3458 }
3459
task_tick_scx(struct rq * rq,struct task_struct * curr,int queued)3460 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
3461 {
3462 struct scx_sched *sch = scx_task_sched(curr);
3463
3464 update_curr_scx(rq);
3465
3466 /*
3467 * While disabling, always resched and refresh core-sched timestamp as
3468 * we can't trust the slice management or ops.core_sched_before().
3469 */
3470 if (scx_bypassing(sch, cpu_of(rq))) {
3471 curr->scx.slice = 0;
3472 touch_core_sched(rq, curr);
3473 } else if (SCX_HAS_OP(sch, tick)) {
3474 SCX_CALL_OP_TASK(sch, tick, rq, curr);
3475 }
3476
3477 if (!curr->scx.slice)
3478 resched_curr(rq);
3479 }
3480
3481 #ifdef CONFIG_EXT_GROUP_SCHED
tg_cgrp(struct task_group * tg)3482 static struct cgroup *tg_cgrp(struct task_group *tg)
3483 {
3484 /*
3485 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
3486 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
3487 * root cgroup.
3488 */
3489 if (tg && tg->css.cgroup)
3490 return tg->css.cgroup;
3491 else
3492 return &cgrp_dfl_root.cgrp;
3493 }
3494
3495 #define SCX_INIT_TASK_ARGS_CGROUP(tg) .cgroup = tg_cgrp(tg),
3496
3497 #else /* CONFIG_EXT_GROUP_SCHED */
3498
3499 #define SCX_INIT_TASK_ARGS_CGROUP(tg)
3500
3501 #endif /* CONFIG_EXT_GROUP_SCHED */
3502
scx_get_task_state(const struct task_struct * p)3503 static u32 scx_get_task_state(const struct task_struct *p)
3504 {
3505 return p->scx.flags & SCX_TASK_STATE_MASK;
3506 }
3507
scx_set_task_state(struct task_struct * p,u32 state)3508 static void scx_set_task_state(struct task_struct *p, u32 state)
3509 {
3510 u32 prev_state = scx_get_task_state(p);
3511 bool warn = false;
3512
3513 switch (state) {
3514 case SCX_TASK_NONE:
3515 break;
3516 case SCX_TASK_INIT:
3517 warn = prev_state != SCX_TASK_NONE;
3518 break;
3519 case SCX_TASK_READY:
3520 warn = prev_state == SCX_TASK_NONE;
3521 break;
3522 case SCX_TASK_ENABLED:
3523 warn = prev_state != SCX_TASK_READY;
3524 break;
3525 default:
3526 WARN_ONCE(1, "sched_ext: Invalid task state %d -> %d for %s[%d]",
3527 prev_state, state, p->comm, p->pid);
3528 return;
3529 }
3530
3531 WARN_ONCE(warn, "sched_ext: Invalid task state transition 0x%x -> 0x%x for %s[%d]",
3532 prev_state, state, p->comm, p->pid);
3533
3534 p->scx.flags &= ~SCX_TASK_STATE_MASK;
3535 p->scx.flags |= state;
3536 }
3537
__scx_init_task(struct scx_sched * sch,struct task_struct * p,bool fork)3538 static int __scx_init_task(struct scx_sched *sch, struct task_struct *p, bool fork)
3539 {
3540 int ret;
3541
3542 p->scx.disallow = false;
3543
3544 if (SCX_HAS_OP(sch, init_task)) {
3545 struct scx_init_task_args args = {
3546 SCX_INIT_TASK_ARGS_CGROUP(task_group(p))
3547 .fork = fork,
3548 };
3549
3550 ret = SCX_CALL_OP_RET(sch, init_task, NULL, p, &args);
3551 if (unlikely(ret)) {
3552 ret = ops_sanitize_err(sch, "init_task", ret);
3553 return ret;
3554 }
3555 }
3556
3557 if (p->scx.disallow) {
3558 if (unlikely(scx_parent(sch))) {
3559 scx_error(sch, "non-root ops.init_task() set task->scx.disallow for %s[%d]",
3560 p->comm, p->pid);
3561 } else if (unlikely(fork)) {
3562 scx_error(sch, "ops.init_task() set task->scx.disallow for %s[%d] during fork",
3563 p->comm, p->pid);
3564 } else {
3565 struct rq *rq;
3566 struct rq_flags rf;
3567
3568 rq = task_rq_lock(p, &rf);
3569
3570 /*
3571 * We're in the load path and @p->policy will be applied
3572 * right after. Reverting @p->policy here and rejecting
3573 * %SCHED_EXT transitions from scx_check_setscheduler()
3574 * guarantees that if ops.init_task() sets @p->disallow,
3575 * @p can never be in SCX.
3576 */
3577 if (p->policy == SCHED_EXT) {
3578 p->policy = SCHED_NORMAL;
3579 atomic_long_inc(&scx_nr_rejected);
3580 }
3581
3582 task_rq_unlock(rq, p, &rf);
3583 }
3584 }
3585
3586 return 0;
3587 }
3588
scx_init_task(struct scx_sched * sch,struct task_struct * p,bool fork)3589 static int scx_init_task(struct scx_sched *sch, struct task_struct *p, bool fork)
3590 {
3591 int ret;
3592
3593 ret = __scx_init_task(sch, p, fork);
3594 if (!ret) {
3595 /*
3596 * While @p's rq is not locked. @p is not visible to the rest of
3597 * SCX yet and it's safe to update the flags and state.
3598 */
3599 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
3600 scx_set_task_state(p, SCX_TASK_INIT);
3601 }
3602 return ret;
3603 }
3604
__scx_enable_task(struct scx_sched * sch,struct task_struct * p)3605 static void __scx_enable_task(struct scx_sched *sch, struct task_struct *p)
3606 {
3607 struct rq *rq = task_rq(p);
3608 u32 weight;
3609
3610 lockdep_assert_rq_held(rq);
3611
3612 /*
3613 * Verify the task is not in BPF scheduler's custody. If flag
3614 * transitions are consistent, the flag should always be clear
3615 * here.
3616 */
3617 WARN_ON_ONCE(p->scx.flags & SCX_TASK_IN_CUSTODY);
3618
3619 /*
3620 * Set the weight before calling ops.enable() so that the scheduler
3621 * doesn't see a stale value if they inspect the task struct.
3622 */
3623 if (task_has_idle_policy(p))
3624 weight = WEIGHT_IDLEPRIO;
3625 else
3626 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
3627
3628 p->scx.weight = sched_weight_to_cgroup(weight);
3629
3630 if (SCX_HAS_OP(sch, enable))
3631 SCX_CALL_OP_TASK(sch, enable, rq, p);
3632
3633 if (SCX_HAS_OP(sch, set_weight))
3634 SCX_CALL_OP_TASK(sch, set_weight, rq, p, p->scx.weight);
3635 }
3636
scx_enable_task(struct scx_sched * sch,struct task_struct * p)3637 static void scx_enable_task(struct scx_sched *sch, struct task_struct *p)
3638 {
3639 __scx_enable_task(sch, p);
3640 scx_set_task_state(p, SCX_TASK_ENABLED);
3641 }
3642
scx_disable_task(struct scx_sched * sch,struct task_struct * p)3643 static void scx_disable_task(struct scx_sched *sch, struct task_struct *p)
3644 {
3645 struct rq *rq = task_rq(p);
3646
3647 lockdep_assert_rq_held(rq);
3648 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
3649
3650 clear_direct_dispatch(p);
3651
3652 if (SCX_HAS_OP(sch, disable))
3653 SCX_CALL_OP_TASK(sch, disable, rq, p);
3654 scx_set_task_state(p, SCX_TASK_READY);
3655
3656 /*
3657 * Verify the task is not in BPF scheduler's custody. If flag
3658 * transitions are consistent, the flag should always be clear
3659 * here.
3660 */
3661 WARN_ON_ONCE(p->scx.flags & SCX_TASK_IN_CUSTODY);
3662 }
3663
__scx_disable_and_exit_task(struct scx_sched * sch,struct task_struct * p)3664 static void __scx_disable_and_exit_task(struct scx_sched *sch,
3665 struct task_struct *p)
3666 {
3667 struct scx_exit_task_args args = {
3668 .cancelled = false,
3669 };
3670
3671 lockdep_assert_held(&p->pi_lock);
3672 lockdep_assert_rq_held(task_rq(p));
3673
3674 switch (scx_get_task_state(p)) {
3675 case SCX_TASK_NONE:
3676 return;
3677 case SCX_TASK_INIT:
3678 args.cancelled = true;
3679 break;
3680 case SCX_TASK_READY:
3681 break;
3682 case SCX_TASK_ENABLED:
3683 scx_disable_task(sch, p);
3684 break;
3685 default:
3686 WARN_ON_ONCE(true);
3687 return;
3688 }
3689
3690 if (SCX_HAS_OP(sch, exit_task))
3691 SCX_CALL_OP_TASK(sch, exit_task, task_rq(p), p, &args);
3692 }
3693
3694 /*
3695 * Undo a completed __scx_init_task(sch, p, false) when scx_enable_task() never
3696 * ran. The task state has not been transitioned, so this mirrors the
3697 * SCX_TASK_INIT branch in __scx_disable_and_exit_task().
3698 */
scx_sub_init_cancel_task(struct scx_sched * sch,struct task_struct * p)3699 static void scx_sub_init_cancel_task(struct scx_sched *sch, struct task_struct *p)
3700 {
3701 struct scx_exit_task_args args = { .cancelled = true };
3702
3703 lockdep_assert_held(&p->pi_lock);
3704 lockdep_assert_rq_held(task_rq(p));
3705
3706 if (SCX_HAS_OP(sch, exit_task))
3707 SCX_CALL_OP_TASK(sch, exit_task, task_rq(p), p, &args);
3708 }
3709
scx_disable_and_exit_task(struct scx_sched * sch,struct task_struct * p)3710 static void scx_disable_and_exit_task(struct scx_sched *sch,
3711 struct task_struct *p)
3712 {
3713 __scx_disable_and_exit_task(sch, p);
3714
3715 /*
3716 * If set, @p exited between __scx_init_task() and scx_enable_task() in
3717 * scx_sub_enable() and is initialized for both the associated sched and
3718 * its parent. Exit for the child too - scx_enable_task() never ran for
3719 * it, so undo only init_task.
3720 */
3721 if (p->scx.flags & SCX_TASK_SUB_INIT) {
3722 if (!WARN_ON_ONCE(!scx_enabling_sub_sched))
3723 scx_sub_init_cancel_task(scx_enabling_sub_sched, p);
3724 p->scx.flags &= ~SCX_TASK_SUB_INIT;
3725 }
3726
3727 scx_set_task_sched(p, NULL);
3728 scx_set_task_state(p, SCX_TASK_NONE);
3729 }
3730
init_scx_entity(struct sched_ext_entity * scx)3731 void init_scx_entity(struct sched_ext_entity *scx)
3732 {
3733 memset(scx, 0, sizeof(*scx));
3734 INIT_LIST_HEAD(&scx->dsq_list.node);
3735 RB_CLEAR_NODE(&scx->dsq_priq);
3736 scx->sticky_cpu = -1;
3737 scx->holding_cpu = -1;
3738 INIT_LIST_HEAD(&scx->runnable_node);
3739 scx->runnable_at = jiffies;
3740 scx->ddsp_dsq_id = SCX_DSQ_INVALID;
3741 scx->slice = SCX_SLICE_DFL;
3742 }
3743
scx_pre_fork(struct task_struct * p)3744 void scx_pre_fork(struct task_struct *p)
3745 {
3746 /*
3747 * BPF scheduler enable/disable paths want to be able to iterate and
3748 * update all tasks which can become complex when racing forks. As
3749 * enable/disable are very cold paths, let's use a percpu_rwsem to
3750 * exclude forks.
3751 */
3752 percpu_down_read(&scx_fork_rwsem);
3753 }
3754
scx_fork(struct task_struct * p,struct kernel_clone_args * kargs)3755 int scx_fork(struct task_struct *p, struct kernel_clone_args *kargs)
3756 {
3757 s32 ret;
3758
3759 percpu_rwsem_assert_held(&scx_fork_rwsem);
3760
3761 if (scx_init_task_enabled) {
3762 #ifdef CONFIG_EXT_SUB_SCHED
3763 struct scx_sched *sch = kargs->cset->dfl_cgrp->scx_sched;
3764 #else
3765 struct scx_sched *sch = scx_root;
3766 #endif
3767 ret = scx_init_task(sch, p, true);
3768 if (!ret)
3769 scx_set_task_sched(p, sch);
3770 return ret;
3771 }
3772
3773 return 0;
3774 }
3775
scx_post_fork(struct task_struct * p)3776 void scx_post_fork(struct task_struct *p)
3777 {
3778 if (scx_init_task_enabled) {
3779 scx_set_task_state(p, SCX_TASK_READY);
3780
3781 /*
3782 * Enable the task immediately if it's running on sched_ext.
3783 * Otherwise, it'll be enabled in switching_to_scx() if and
3784 * when it's ever configured to run with a SCHED_EXT policy.
3785 */
3786 if (p->sched_class == &ext_sched_class) {
3787 struct rq_flags rf;
3788 struct rq *rq;
3789
3790 rq = task_rq_lock(p, &rf);
3791 scx_enable_task(scx_task_sched(p), p);
3792 task_rq_unlock(rq, p, &rf);
3793 }
3794 }
3795
3796 raw_spin_lock_irq(&scx_tasks_lock);
3797 list_add_tail(&p->scx.tasks_node, &scx_tasks);
3798 raw_spin_unlock_irq(&scx_tasks_lock);
3799
3800 percpu_up_read(&scx_fork_rwsem);
3801 }
3802
scx_cancel_fork(struct task_struct * p)3803 void scx_cancel_fork(struct task_struct *p)
3804 {
3805 if (scx_enabled()) {
3806 struct rq *rq;
3807 struct rq_flags rf;
3808
3809 rq = task_rq_lock(p, &rf);
3810 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
3811 scx_disable_and_exit_task(scx_task_sched(p), p);
3812 task_rq_unlock(rq, p, &rf);
3813 }
3814
3815 percpu_up_read(&scx_fork_rwsem);
3816 }
3817
3818 /**
3819 * task_dead_and_done - Is a task dead and done running?
3820 * @p: target task
3821 *
3822 * Once sched_ext_dead() removes the dead task from scx_tasks and exits it, the
3823 * task no longer exists from SCX's POV. However, certain sched_class ops may be
3824 * invoked on these dead tasks leading to failures - e.g. sched_setscheduler()
3825 * may try to switch a task which finished sched_ext_dead() back into SCX
3826 * triggering invalid SCX task state transitions and worse.
3827 *
3828 * Once a task has finished the final switch, sched_ext_dead() is the only thing
3829 * that needs to happen on the task. Use this test to short-circuit sched_class
3830 * operations which may be called on dead tasks.
3831 */
task_dead_and_done(struct task_struct * p)3832 static bool task_dead_and_done(struct task_struct *p)
3833 {
3834 struct rq *rq = task_rq(p);
3835
3836 lockdep_assert_rq_held(rq);
3837
3838 /*
3839 * In do_task_dead(), a dying task sets %TASK_DEAD with preemption
3840 * disabled and __schedule(). If @p has %TASK_DEAD set and off CPU, @p
3841 * won't ever run again.
3842 */
3843 return unlikely(READ_ONCE(p->__state) == TASK_DEAD) &&
3844 !task_on_cpu(rq, p);
3845 }
3846
sched_ext_dead(struct task_struct * p)3847 void sched_ext_dead(struct task_struct *p)
3848 {
3849 unsigned long flags;
3850
3851 /*
3852 * By the time control reaches here, @p has %TASK_DEAD set, switched out
3853 * for the last time and then dropped the rq lock - task_dead_and_done()
3854 * should be returning %true nullifying the straggling sched_class ops.
3855 * Remove from scx_tasks and exit @p.
3856 */
3857 raw_spin_lock_irqsave(&scx_tasks_lock, flags);
3858 list_del_init(&p->scx.tasks_node);
3859 raw_spin_unlock_irqrestore(&scx_tasks_lock, flags);
3860
3861 /*
3862 * @p is off scx_tasks and wholly ours. scx_root_enable()'s READY ->
3863 * ENABLED transitions can't race us. Disable ops for @p.
3864 *
3865 * %SCX_TASK_OFF_TASKS synchronizes against cgroup task iteration - see
3866 * scx_task_iter_next_locked(). NONE tasks need no marking: cgroup
3867 * iteration is only used from sub-sched paths, which require root
3868 * enabled. Root enable transitions every live task to at least READY.
3869 */
3870 if (scx_get_task_state(p) != SCX_TASK_NONE) {
3871 struct rq_flags rf;
3872 struct rq *rq;
3873
3874 rq = task_rq_lock(p, &rf);
3875 scx_disable_and_exit_task(scx_task_sched(p), p);
3876 p->scx.flags |= SCX_TASK_OFF_TASKS;
3877 task_rq_unlock(rq, p, &rf);
3878 }
3879 }
3880
reweight_task_scx(struct rq * rq,struct task_struct * p,const struct load_weight * lw)3881 static void reweight_task_scx(struct rq *rq, struct task_struct *p,
3882 const struct load_weight *lw)
3883 {
3884 struct scx_sched *sch = scx_task_sched(p);
3885
3886 lockdep_assert_rq_held(task_rq(p));
3887
3888 if (task_dead_and_done(p))
3889 return;
3890
3891 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
3892 if (SCX_HAS_OP(sch, set_weight))
3893 SCX_CALL_OP_TASK(sch, set_weight, rq, p, p->scx.weight);
3894 }
3895
prio_changed_scx(struct rq * rq,struct task_struct * p,u64 oldprio)3896 static void prio_changed_scx(struct rq *rq, struct task_struct *p, u64 oldprio)
3897 {
3898 }
3899
switching_to_scx(struct rq * rq,struct task_struct * p)3900 static void switching_to_scx(struct rq *rq, struct task_struct *p)
3901 {
3902 struct scx_sched *sch = scx_task_sched(p);
3903
3904 if (task_dead_and_done(p))
3905 return;
3906
3907 scx_enable_task(sch, p);
3908
3909 /*
3910 * set_cpus_allowed_scx() is not called while @p is associated with a
3911 * different scheduler class. Keep the BPF scheduler up-to-date.
3912 */
3913 if (SCX_HAS_OP(sch, set_cpumask))
3914 SCX_CALL_OP_TASK(sch, set_cpumask, rq, p, (struct cpumask *)p->cpus_ptr);
3915 }
3916
switched_from_scx(struct rq * rq,struct task_struct * p)3917 static void switched_from_scx(struct rq *rq, struct task_struct *p)
3918 {
3919 if (task_dead_and_done(p))
3920 return;
3921
3922 scx_disable_task(scx_task_sched(p), p);
3923 }
3924
switched_to_scx(struct rq * rq,struct task_struct * p)3925 static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
3926
scx_check_setscheduler(struct task_struct * p,int policy)3927 int scx_check_setscheduler(struct task_struct *p, int policy)
3928 {
3929 lockdep_assert_rq_held(task_rq(p));
3930
3931 /* if disallow, reject transitioning into SCX */
3932 if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
3933 p->policy != policy && policy == SCHED_EXT)
3934 return -EACCES;
3935
3936 return 0;
3937 }
3938
process_ddsp_deferred_locals(struct rq * rq)3939 static void process_ddsp_deferred_locals(struct rq *rq)
3940 {
3941 struct task_struct *p;
3942
3943 lockdep_assert_rq_held(rq);
3944
3945 /*
3946 * Now that @rq can be unlocked, execute the deferred enqueueing of
3947 * tasks directly dispatched to the local DSQs of other CPUs. See
3948 * direct_dispatch(). Keep popping from the head instead of using
3949 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
3950 * temporarily.
3951 */
3952 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
3953 struct task_struct, scx.dsq_list.node))) {
3954 struct scx_sched *sch = scx_task_sched(p);
3955 struct scx_dispatch_q *dsq;
3956 u64 dsq_id = p->scx.ddsp_dsq_id;
3957 u64 enq_flags = p->scx.ddsp_enq_flags;
3958
3959 list_del_init(&p->scx.dsq_list.node);
3960 clear_direct_dispatch(p);
3961
3962 dsq = find_dsq_for_dispatch(sch, rq, dsq_id, task_cpu(p));
3963 if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
3964 dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
3965 }
3966 }
3967
3968 /*
3969 * Determine whether @p should be reenqueued from a local DSQ.
3970 *
3971 * @reenq_flags is mutable and accumulates state across the DSQ walk:
3972 *
3973 * - %SCX_REENQ_TSR_NOT_FIRST: Set after the first task is visited. "First"
3974 * tracks position in the DSQ list, not among IMMED tasks. A non-IMMED task at
3975 * the head consumes the first slot.
3976 *
3977 * - %SCX_REENQ_TSR_RQ_OPEN: Set by reenq_local() before the walk if
3978 * rq_is_open() is true.
3979 *
3980 * An IMMED task is kept (returns %false) only if it's the first task in the DSQ
3981 * AND the current task is done — i.e. it will execute immediately. All other
3982 * IMMED tasks are reenqueued. This means if a non-IMMED task sits at the head,
3983 * every IMMED task behind it gets reenqueued.
3984 *
3985 * Reenqueued tasks go through ops.enqueue() with %SCX_ENQ_REENQ |
3986 * %SCX_TASK_REENQ_IMMED. If the BPF scheduler dispatches back to the same local
3987 * DSQ with %SCX_ENQ_IMMED while the CPU is still unavailable, this triggers
3988 * another reenq cycle. Repetitions are bounded by %SCX_REENQ_LOCAL_MAX_REPEAT
3989 * in process_deferred_reenq_locals().
3990 */
local_task_should_reenq(struct task_struct * p,u64 * reenq_flags,u32 * reason)3991 static bool local_task_should_reenq(struct task_struct *p, u64 *reenq_flags, u32 *reason)
3992 {
3993 bool first;
3994
3995 first = !(*reenq_flags & SCX_REENQ_TSR_NOT_FIRST);
3996 *reenq_flags |= SCX_REENQ_TSR_NOT_FIRST;
3997
3998 *reason = SCX_TASK_REENQ_KFUNC;
3999
4000 if ((p->scx.flags & SCX_TASK_IMMED) &&
4001 (!first || !(*reenq_flags & SCX_REENQ_TSR_RQ_OPEN))) {
4002 __scx_add_event(scx_task_sched(p), SCX_EV_REENQ_IMMED, 1);
4003 *reason = SCX_TASK_REENQ_IMMED;
4004 return true;
4005 }
4006
4007 return *reenq_flags & SCX_REENQ_ANY;
4008 }
4009
reenq_local(struct scx_sched * sch,struct rq * rq,u64 reenq_flags)4010 static u32 reenq_local(struct scx_sched *sch, struct rq *rq, u64 reenq_flags)
4011 {
4012 LIST_HEAD(tasks);
4013 u32 nr_enqueued = 0;
4014 struct task_struct *p, *n;
4015
4016 lockdep_assert_rq_held(rq);
4017
4018 if (WARN_ON_ONCE(reenq_flags & __SCX_REENQ_TSR_MASK))
4019 reenq_flags &= ~__SCX_REENQ_TSR_MASK;
4020 if (rq_is_open(rq, 0))
4021 reenq_flags |= SCX_REENQ_TSR_RQ_OPEN;
4022
4023 /*
4024 * The BPF scheduler may choose to dispatch tasks back to
4025 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
4026 * first to avoid processing the same tasks repeatedly.
4027 */
4028 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
4029 scx.dsq_list.node) {
4030 struct scx_sched *task_sch = scx_task_sched(p);
4031 u32 reason;
4032
4033 /*
4034 * If @p is being migrated, @p's current CPU may not agree with
4035 * its allowed CPUs and the migration_cpu_stop is about to
4036 * deactivate and re-activate @p anyway. Skip re-enqueueing.
4037 *
4038 * While racing sched property changes may also dequeue and
4039 * re-enqueue a migrating task while its current CPU and allowed
4040 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
4041 * the current local DSQ for running tasks and thus are not
4042 * visible to the BPF scheduler.
4043 */
4044 if (p->migration_pending)
4045 continue;
4046
4047 if (!scx_is_descendant(task_sch, sch))
4048 continue;
4049
4050 if (!local_task_should_reenq(p, &reenq_flags, &reason))
4051 continue;
4052
4053 dispatch_dequeue(rq, p);
4054
4055 if (WARN_ON_ONCE(p->scx.flags & SCX_TASK_REENQ_REASON_MASK))
4056 p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
4057 p->scx.flags |= reason;
4058
4059 list_add_tail(&p->scx.dsq_list.node, &tasks);
4060 }
4061
4062 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
4063 list_del_init(&p->scx.dsq_list.node);
4064
4065 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
4066
4067 p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
4068 nr_enqueued++;
4069 }
4070
4071 return nr_enqueued;
4072 }
4073
process_deferred_reenq_locals(struct rq * rq)4074 static void process_deferred_reenq_locals(struct rq *rq)
4075 {
4076 u64 seq = ++rq->scx.deferred_reenq_locals_seq;
4077
4078 lockdep_assert_rq_held(rq);
4079
4080 while (true) {
4081 struct scx_sched *sch;
4082 u64 reenq_flags;
4083 bool skip = false;
4084
4085 scoped_guard (raw_spinlock, &rq->scx.deferred_reenq_lock) {
4086 struct scx_deferred_reenq_local *drl =
4087 list_first_entry_or_null(&rq->scx.deferred_reenq_locals,
4088 struct scx_deferred_reenq_local,
4089 node);
4090 struct scx_sched_pcpu *sch_pcpu;
4091
4092 if (!drl)
4093 return;
4094
4095 sch_pcpu = container_of(drl, struct scx_sched_pcpu,
4096 deferred_reenq_local);
4097 sch = sch_pcpu->sch;
4098
4099 reenq_flags = drl->flags;
4100 WRITE_ONCE(drl->flags, 0);
4101 list_del_init(&drl->node);
4102
4103 if (likely(drl->seq != seq)) {
4104 drl->seq = seq;
4105 drl->cnt = 0;
4106 } else {
4107 if (unlikely(++drl->cnt > SCX_REENQ_LOCAL_MAX_REPEAT)) {
4108 scx_error(sch, "SCX_ENQ_REENQ on SCX_DSQ_LOCAL repeated %u times",
4109 drl->cnt);
4110 skip = true;
4111 }
4112
4113 __scx_add_event(sch, SCX_EV_REENQ_LOCAL_REPEAT, 1);
4114 }
4115 }
4116
4117 if (!skip) {
4118 /* see schedule_dsq_reenq() */
4119 smp_mb();
4120
4121 reenq_local(sch, rq, reenq_flags);
4122 }
4123 }
4124 }
4125
user_task_should_reenq(struct task_struct * p,u64 reenq_flags,u32 * reason)4126 static bool user_task_should_reenq(struct task_struct *p, u64 reenq_flags, u32 *reason)
4127 {
4128 *reason = SCX_TASK_REENQ_KFUNC;
4129 return reenq_flags & SCX_REENQ_ANY;
4130 }
4131
reenq_user(struct rq * rq,struct scx_dispatch_q * dsq,u64 reenq_flags)4132 static void reenq_user(struct rq *rq, struct scx_dispatch_q *dsq, u64 reenq_flags)
4133 {
4134 struct rq *locked_rq = rq;
4135 struct scx_sched *sch = dsq->sched;
4136 struct scx_dsq_list_node cursor = INIT_DSQ_LIST_CURSOR(cursor, dsq, 0);
4137 struct task_struct *p;
4138 s32 nr_enqueued = 0;
4139
4140 lockdep_assert_rq_held(rq);
4141
4142 raw_spin_lock(&dsq->lock);
4143
4144 while (likely(!READ_ONCE(sch->bypass_depth))) {
4145 struct rq *task_rq;
4146 u32 reason;
4147
4148 p = nldsq_cursor_next_task(&cursor, dsq);
4149 if (!p)
4150 break;
4151
4152 if (!user_task_should_reenq(p, reenq_flags, &reason))
4153 continue;
4154
4155 task_rq = task_rq(p);
4156
4157 if (locked_rq != task_rq) {
4158 if (locked_rq)
4159 raw_spin_rq_unlock(locked_rq);
4160 if (unlikely(!raw_spin_rq_trylock(task_rq))) {
4161 raw_spin_unlock(&dsq->lock);
4162 raw_spin_rq_lock(task_rq);
4163 raw_spin_lock(&dsq->lock);
4164 }
4165 locked_rq = task_rq;
4166
4167 /* did we lose @p while switching locks? */
4168 if (nldsq_cursor_lost_task(&cursor, task_rq, dsq, p))
4169 continue;
4170 }
4171
4172 /* @p is on @dsq, its rq and @dsq are locked */
4173 dispatch_dequeue_locked(p, dsq);
4174 raw_spin_unlock(&dsq->lock);
4175
4176 if (WARN_ON_ONCE(p->scx.flags & SCX_TASK_REENQ_REASON_MASK))
4177 p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
4178 p->scx.flags |= reason;
4179
4180 do_enqueue_task(task_rq, p, SCX_ENQ_REENQ, -1);
4181
4182 p->scx.flags &= ~SCX_TASK_REENQ_REASON_MASK;
4183
4184 if (!(++nr_enqueued % SCX_TASK_ITER_BATCH)) {
4185 raw_spin_rq_unlock(locked_rq);
4186 locked_rq = NULL;
4187 cpu_relax();
4188 }
4189
4190 raw_spin_lock(&dsq->lock);
4191 }
4192
4193 list_del_init(&cursor.node);
4194 raw_spin_unlock(&dsq->lock);
4195
4196 if (locked_rq != rq) {
4197 if (locked_rq)
4198 raw_spin_rq_unlock(locked_rq);
4199 raw_spin_rq_lock(rq);
4200 }
4201 }
4202
process_deferred_reenq_users(struct rq * rq)4203 static void process_deferred_reenq_users(struct rq *rq)
4204 {
4205 lockdep_assert_rq_held(rq);
4206
4207 while (true) {
4208 struct scx_dispatch_q *dsq;
4209 u64 reenq_flags;
4210
4211 scoped_guard (raw_spinlock, &rq->scx.deferred_reenq_lock) {
4212 struct scx_deferred_reenq_user *dru =
4213 list_first_entry_or_null(&rq->scx.deferred_reenq_users,
4214 struct scx_deferred_reenq_user,
4215 node);
4216 struct scx_dsq_pcpu *dsq_pcpu;
4217
4218 if (!dru)
4219 return;
4220
4221 dsq_pcpu = container_of(dru, struct scx_dsq_pcpu,
4222 deferred_reenq_user);
4223 dsq = dsq_pcpu->dsq;
4224 reenq_flags = dru->flags;
4225 WRITE_ONCE(dru->flags, 0);
4226 list_del_init(&dru->node);
4227 }
4228
4229 /* see schedule_dsq_reenq() */
4230 smp_mb();
4231
4232 BUG_ON(dsq->id & SCX_DSQ_FLAG_BUILTIN);
4233 reenq_user(rq, dsq, reenq_flags);
4234 }
4235 }
4236
run_deferred(struct rq * rq)4237 static void run_deferred(struct rq *rq)
4238 {
4239 process_ddsp_deferred_locals(rq);
4240
4241 if (!list_empty(&rq->scx.deferred_reenq_locals))
4242 process_deferred_reenq_locals(rq);
4243
4244 if (!list_empty(&rq->scx.deferred_reenq_users))
4245 process_deferred_reenq_users(rq);
4246 }
4247
4248 #ifdef CONFIG_NO_HZ_FULL
scx_can_stop_tick(struct rq * rq)4249 bool scx_can_stop_tick(struct rq *rq)
4250 {
4251 struct task_struct *p = rq->curr;
4252 struct scx_sched *sch = scx_task_sched(p);
4253
4254 if (p->sched_class != &ext_sched_class)
4255 return true;
4256
4257 if (scx_bypassing(sch, cpu_of(rq)))
4258 return false;
4259
4260 /*
4261 * @rq can dispatch from different DSQs, so we can't tell whether it
4262 * needs the tick or not by looking at nr_running. Allow stopping ticks
4263 * iff the BPF scheduler indicated so. See set_next_task_scx().
4264 */
4265 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
4266 }
4267 #endif
4268
4269 #ifdef CONFIG_EXT_GROUP_SCHED
4270
4271 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_ops_rwsem);
4272 static bool scx_cgroup_enabled;
4273
scx_tg_init(struct task_group * tg)4274 void scx_tg_init(struct task_group *tg)
4275 {
4276 tg->scx.weight = CGROUP_WEIGHT_DFL;
4277 tg->scx.bw_period_us = default_bw_period_us();
4278 tg->scx.bw_quota_us = RUNTIME_INF;
4279 tg->scx.idle = false;
4280 }
4281
scx_tg_online(struct task_group * tg)4282 int scx_tg_online(struct task_group *tg)
4283 {
4284 struct scx_sched *sch = scx_root;
4285 int ret = 0;
4286
4287 WARN_ON_ONCE(tg->scx.flags & (SCX_TG_ONLINE | SCX_TG_INITED));
4288
4289 if (scx_cgroup_enabled) {
4290 if (SCX_HAS_OP(sch, cgroup_init)) {
4291 struct scx_cgroup_init_args args =
4292 { .weight = tg->scx.weight,
4293 .bw_period_us = tg->scx.bw_period_us,
4294 .bw_quota_us = tg->scx.bw_quota_us,
4295 .bw_burst_us = tg->scx.bw_burst_us };
4296
4297 ret = SCX_CALL_OP_RET(sch, cgroup_init,
4298 NULL, tg->css.cgroup, &args);
4299 if (ret)
4300 ret = ops_sanitize_err(sch, "cgroup_init", ret);
4301 }
4302 if (ret == 0)
4303 tg->scx.flags |= SCX_TG_ONLINE | SCX_TG_INITED;
4304 } else {
4305 tg->scx.flags |= SCX_TG_ONLINE;
4306 }
4307
4308 return ret;
4309 }
4310
scx_tg_offline(struct task_group * tg)4311 void scx_tg_offline(struct task_group *tg)
4312 {
4313 struct scx_sched *sch = scx_root;
4314
4315 WARN_ON_ONCE(!(tg->scx.flags & SCX_TG_ONLINE));
4316
4317 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_exit) &&
4318 (tg->scx.flags & SCX_TG_INITED))
4319 SCX_CALL_OP(sch, cgroup_exit, NULL, tg->css.cgroup);
4320 tg->scx.flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
4321 }
4322
scx_cgroup_can_attach(struct cgroup_taskset * tset)4323 int scx_cgroup_can_attach(struct cgroup_taskset *tset)
4324 {
4325 struct scx_sched *sch = scx_root;
4326 struct cgroup_subsys_state *css;
4327 struct task_struct *p;
4328 int ret;
4329
4330 if (!scx_cgroup_enabled)
4331 return 0;
4332
4333 cgroup_taskset_for_each(p, css, tset) {
4334 struct cgroup *from = tg_cgrp(task_group(p));
4335 struct cgroup *to = tg_cgrp(css_tg(css));
4336
4337 WARN_ON_ONCE(p->scx.cgrp_moving_from);
4338
4339 /*
4340 * sched_move_task() omits identity migrations. Let's match the
4341 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
4342 * always match one-to-one.
4343 */
4344 if (from == to)
4345 continue;
4346
4347 if (SCX_HAS_OP(sch, cgroup_prep_move)) {
4348 ret = SCX_CALL_OP_RET(sch, cgroup_prep_move, NULL,
4349 p, from, css->cgroup);
4350 if (ret)
4351 goto err;
4352 }
4353
4354 p->scx.cgrp_moving_from = from;
4355 }
4356
4357 return 0;
4358
4359 err:
4360 cgroup_taskset_for_each(p, css, tset) {
4361 if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
4362 p->scx.cgrp_moving_from)
4363 SCX_CALL_OP(sch, cgroup_cancel_move, NULL,
4364 p, p->scx.cgrp_moving_from, css->cgroup);
4365 p->scx.cgrp_moving_from = NULL;
4366 }
4367
4368 return ops_sanitize_err(sch, "cgroup_prep_move", ret);
4369 }
4370
scx_cgroup_move_task(struct task_struct * p)4371 void scx_cgroup_move_task(struct task_struct *p)
4372 {
4373 struct scx_sched *sch = scx_root;
4374
4375 if (!scx_cgroup_enabled)
4376 return;
4377
4378 /*
4379 * @p must have ops.cgroup_prep_move() called on it and thus
4380 * cgrp_moving_from set.
4381 */
4382 if (SCX_HAS_OP(sch, cgroup_move) &&
4383 !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
4384 SCX_CALL_OP_TASK(sch, cgroup_move, task_rq(p),
4385 p, p->scx.cgrp_moving_from,
4386 tg_cgrp(task_group(p)));
4387 p->scx.cgrp_moving_from = NULL;
4388 }
4389
scx_cgroup_cancel_attach(struct cgroup_taskset * tset)4390 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
4391 {
4392 struct scx_sched *sch = scx_root;
4393 struct cgroup_subsys_state *css;
4394 struct task_struct *p;
4395
4396 if (!scx_cgroup_enabled)
4397 return;
4398
4399 cgroup_taskset_for_each(p, css, tset) {
4400 if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
4401 p->scx.cgrp_moving_from)
4402 SCX_CALL_OP(sch, cgroup_cancel_move, NULL,
4403 p, p->scx.cgrp_moving_from, css->cgroup);
4404 p->scx.cgrp_moving_from = NULL;
4405 }
4406 }
4407
scx_group_set_weight(struct task_group * tg,unsigned long weight)4408 void scx_group_set_weight(struct task_group *tg, unsigned long weight)
4409 {
4410 struct scx_sched *sch;
4411
4412 percpu_down_read(&scx_cgroup_ops_rwsem);
4413 sch = scx_root;
4414
4415 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) &&
4416 tg->scx.weight != weight)
4417 SCX_CALL_OP(sch, cgroup_set_weight, NULL, tg_cgrp(tg), weight);
4418
4419 tg->scx.weight = weight;
4420
4421 percpu_up_read(&scx_cgroup_ops_rwsem);
4422 }
4423
scx_group_set_idle(struct task_group * tg,bool idle)4424 void scx_group_set_idle(struct task_group *tg, bool idle)
4425 {
4426 struct scx_sched *sch;
4427
4428 percpu_down_read(&scx_cgroup_ops_rwsem);
4429 sch = scx_root;
4430
4431 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_idle))
4432 SCX_CALL_OP(sch, cgroup_set_idle, NULL, tg_cgrp(tg), idle);
4433
4434 /* Update the task group's idle state */
4435 tg->scx.idle = idle;
4436
4437 percpu_up_read(&scx_cgroup_ops_rwsem);
4438 }
4439
scx_group_set_bandwidth(struct task_group * tg,u64 period_us,u64 quota_us,u64 burst_us)4440 void scx_group_set_bandwidth(struct task_group *tg,
4441 u64 period_us, u64 quota_us, u64 burst_us)
4442 {
4443 struct scx_sched *sch;
4444
4445 percpu_down_read(&scx_cgroup_ops_rwsem);
4446 sch = scx_root;
4447
4448 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_bandwidth) &&
4449 (tg->scx.bw_period_us != period_us ||
4450 tg->scx.bw_quota_us != quota_us ||
4451 tg->scx.bw_burst_us != burst_us))
4452 SCX_CALL_OP(sch, cgroup_set_bandwidth, NULL,
4453 tg_cgrp(tg), period_us, quota_us, burst_us);
4454
4455 tg->scx.bw_period_us = period_us;
4456 tg->scx.bw_quota_us = quota_us;
4457 tg->scx.bw_burst_us = burst_us;
4458
4459 percpu_up_read(&scx_cgroup_ops_rwsem);
4460 }
4461 #endif /* CONFIG_EXT_GROUP_SCHED */
4462
4463 #if defined(CONFIG_EXT_GROUP_SCHED) || defined(CONFIG_EXT_SUB_SCHED)
root_cgroup(void)4464 static struct cgroup *root_cgroup(void)
4465 {
4466 return &cgrp_dfl_root.cgrp;
4467 }
4468
scx_cgroup_lock(void)4469 static void scx_cgroup_lock(void)
4470 {
4471 #ifdef CONFIG_EXT_GROUP_SCHED
4472 percpu_down_write(&scx_cgroup_ops_rwsem);
4473 #endif
4474 cgroup_lock();
4475 }
4476
scx_cgroup_unlock(void)4477 static void scx_cgroup_unlock(void)
4478 {
4479 cgroup_unlock();
4480 #ifdef CONFIG_EXT_GROUP_SCHED
4481 percpu_up_write(&scx_cgroup_ops_rwsem);
4482 #endif
4483 }
4484 #else /* CONFIG_EXT_GROUP_SCHED || CONFIG_EXT_SUB_SCHED */
root_cgroup(void)4485 static struct cgroup *root_cgroup(void) { return NULL; }
scx_cgroup_lock(void)4486 static void scx_cgroup_lock(void) {}
scx_cgroup_unlock(void)4487 static void scx_cgroup_unlock(void) {}
4488 #endif /* CONFIG_EXT_GROUP_SCHED || CONFIG_EXT_SUB_SCHED */
4489
4490 #ifdef CONFIG_EXT_SUB_SCHED
sch_cgroup(struct scx_sched * sch)4491 static struct cgroup *sch_cgroup(struct scx_sched *sch)
4492 {
4493 return sch->cgrp;
4494 }
4495
4496 /* for each descendant of @cgrp including self, set ->scx_sched to @sch */
set_cgroup_sched(struct cgroup * cgrp,struct scx_sched * sch)4497 static void set_cgroup_sched(struct cgroup *cgrp, struct scx_sched *sch)
4498 {
4499 struct cgroup *pos;
4500 struct cgroup_subsys_state *css;
4501
4502 cgroup_for_each_live_descendant_pre(pos, css, cgrp)
4503 rcu_assign_pointer(pos->scx_sched, sch);
4504 }
4505 #else /* CONFIG_EXT_SUB_SCHED */
sch_cgroup(struct scx_sched * sch)4506 static struct cgroup *sch_cgroup(struct scx_sched *sch) { return NULL; }
set_cgroup_sched(struct cgroup * cgrp,struct scx_sched * sch)4507 static void set_cgroup_sched(struct cgroup *cgrp, struct scx_sched *sch) {}
4508 #endif /* CONFIG_EXT_SUB_SCHED */
4509
4510 /*
4511 * Omitted operations:
4512 *
4513 * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
4514 *
4515 * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
4516 * their current sched_class. Call them directly from sched core instead.
4517 */
4518 DEFINE_SCHED_CLASS(ext) = {
4519 .enqueue_task = enqueue_task_scx,
4520 .dequeue_task = dequeue_task_scx,
4521 .yield_task = yield_task_scx,
4522 .yield_to_task = yield_to_task_scx,
4523
4524 .wakeup_preempt = wakeup_preempt_scx,
4525
4526 .pick_task = pick_task_scx,
4527
4528 .put_prev_task = put_prev_task_scx,
4529 .set_next_task = set_next_task_scx,
4530
4531 .select_task_rq = select_task_rq_scx,
4532 .task_woken = task_woken_scx,
4533 .set_cpus_allowed = set_cpus_allowed_scx,
4534
4535 .rq_online = rq_online_scx,
4536 .rq_offline = rq_offline_scx,
4537
4538 .task_tick = task_tick_scx,
4539
4540 .switching_to = switching_to_scx,
4541 .switched_from = switched_from_scx,
4542 .switched_to = switched_to_scx,
4543 .reweight_task = reweight_task_scx,
4544 .prio_changed = prio_changed_scx,
4545
4546 .update_curr = update_curr_scx,
4547
4548 #ifdef CONFIG_UCLAMP_TASK
4549 .uclamp_enabled = 1,
4550 #endif
4551 };
4552
init_dsq(struct scx_dispatch_q * dsq,u64 dsq_id,struct scx_sched * sch)4553 static s32 init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id,
4554 struct scx_sched *sch)
4555 {
4556 s32 cpu;
4557
4558 memset(dsq, 0, sizeof(*dsq));
4559
4560 raw_spin_lock_init(&dsq->lock);
4561 INIT_LIST_HEAD(&dsq->list);
4562 dsq->id = dsq_id;
4563 dsq->sched = sch;
4564
4565 dsq->pcpu = alloc_percpu(struct scx_dsq_pcpu);
4566 if (!dsq->pcpu)
4567 return -ENOMEM;
4568
4569 for_each_possible_cpu(cpu) {
4570 struct scx_dsq_pcpu *pcpu = per_cpu_ptr(dsq->pcpu, cpu);
4571
4572 pcpu->dsq = dsq;
4573 INIT_LIST_HEAD(&pcpu->deferred_reenq_user.node);
4574 }
4575
4576 return 0;
4577 }
4578
exit_dsq(struct scx_dispatch_q * dsq)4579 static void exit_dsq(struct scx_dispatch_q *dsq)
4580 {
4581 s32 cpu;
4582
4583 for_each_possible_cpu(cpu) {
4584 struct scx_dsq_pcpu *pcpu = per_cpu_ptr(dsq->pcpu, cpu);
4585 struct scx_deferred_reenq_user *dru = &pcpu->deferred_reenq_user;
4586 struct rq *rq = cpu_rq(cpu);
4587
4588 /*
4589 * There must have been a RCU grace period since the last
4590 * insertion and @dsq should be off the deferred list by now.
4591 */
4592 if (WARN_ON_ONCE(!list_empty(&dru->node))) {
4593 guard(raw_spinlock_irqsave)(&rq->scx.deferred_reenq_lock);
4594 list_del_init(&dru->node);
4595 }
4596 }
4597
4598 free_percpu(dsq->pcpu);
4599 }
4600
free_dsq_rcufn(struct rcu_head * rcu)4601 static void free_dsq_rcufn(struct rcu_head *rcu)
4602 {
4603 struct scx_dispatch_q *dsq = container_of(rcu, struct scx_dispatch_q, rcu);
4604
4605 exit_dsq(dsq);
4606 kfree(dsq);
4607 }
4608
free_dsq_irq_workfn(struct irq_work * irq_work)4609 static void free_dsq_irq_workfn(struct irq_work *irq_work)
4610 {
4611 struct llist_node *to_free = llist_del_all(&dsqs_to_free);
4612 struct scx_dispatch_q *dsq, *tmp_dsq;
4613
4614 llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
4615 call_rcu(&dsq->rcu, free_dsq_rcufn);
4616 }
4617
4618 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
4619
destroy_dsq(struct scx_sched * sch,u64 dsq_id)4620 static void destroy_dsq(struct scx_sched *sch, u64 dsq_id)
4621 {
4622 struct scx_dispatch_q *dsq;
4623 unsigned long flags;
4624
4625 rcu_read_lock();
4626
4627 dsq = find_user_dsq(sch, dsq_id);
4628 if (!dsq)
4629 goto out_unlock_rcu;
4630
4631 raw_spin_lock_irqsave(&dsq->lock, flags);
4632
4633 if (dsq->nr) {
4634 scx_error(sch, "attempting to destroy in-use dsq 0x%016llx (nr=%u)",
4635 dsq->id, dsq->nr);
4636 goto out_unlock_dsq;
4637 }
4638
4639 if (rhashtable_remove_fast(&sch->dsq_hash, &dsq->hash_node,
4640 dsq_hash_params))
4641 goto out_unlock_dsq;
4642
4643 /*
4644 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
4645 * queueing more tasks. As this function can be called from anywhere,
4646 * freeing is bounced through an irq work to avoid nesting RCU
4647 * operations inside scheduler locks.
4648 */
4649 dsq->id = SCX_DSQ_INVALID;
4650 if (llist_add(&dsq->free_node, &dsqs_to_free))
4651 irq_work_queue(&free_dsq_irq_work);
4652
4653 out_unlock_dsq:
4654 raw_spin_unlock_irqrestore(&dsq->lock, flags);
4655 out_unlock_rcu:
4656 rcu_read_unlock();
4657 }
4658
4659 #ifdef CONFIG_EXT_GROUP_SCHED
scx_cgroup_exit(struct scx_sched * sch)4660 static void scx_cgroup_exit(struct scx_sched *sch)
4661 {
4662 struct cgroup_subsys_state *css;
4663
4664 scx_cgroup_enabled = false;
4665
4666 /*
4667 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk
4668 * cgroups and exit all the inited ones, all online cgroups are exited.
4669 */
4670 css_for_each_descendant_post(css, &root_task_group.css) {
4671 struct task_group *tg = css_tg(css);
4672
4673 if (!(tg->scx.flags & SCX_TG_INITED))
4674 continue;
4675 tg->scx.flags &= ~SCX_TG_INITED;
4676
4677 if (!sch->ops.cgroup_exit)
4678 continue;
4679
4680 SCX_CALL_OP(sch, cgroup_exit, NULL, css->cgroup);
4681 }
4682 }
4683
scx_cgroup_init(struct scx_sched * sch)4684 static int scx_cgroup_init(struct scx_sched *sch)
4685 {
4686 struct cgroup_subsys_state *css;
4687 int ret;
4688
4689 /*
4690 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk
4691 * cgroups and init, all online cgroups are initialized.
4692 */
4693 css_for_each_descendant_pre(css, &root_task_group.css) {
4694 struct task_group *tg = css_tg(css);
4695 struct scx_cgroup_init_args args = {
4696 .weight = tg->scx.weight,
4697 .bw_period_us = tg->scx.bw_period_us,
4698 .bw_quota_us = tg->scx.bw_quota_us,
4699 .bw_burst_us = tg->scx.bw_burst_us,
4700 };
4701
4702 if ((tg->scx.flags &
4703 (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
4704 continue;
4705
4706 if (!sch->ops.cgroup_init) {
4707 tg->scx.flags |= SCX_TG_INITED;
4708 continue;
4709 }
4710
4711 ret = SCX_CALL_OP_RET(sch, cgroup_init, NULL,
4712 css->cgroup, &args);
4713 if (ret) {
4714 scx_error(sch, "ops.cgroup_init() failed (%d)", ret);
4715 return ret;
4716 }
4717 tg->scx.flags |= SCX_TG_INITED;
4718 }
4719
4720 WARN_ON_ONCE(scx_cgroup_enabled);
4721 scx_cgroup_enabled = true;
4722
4723 return 0;
4724 }
4725
4726 #else
scx_cgroup_exit(struct scx_sched * sch)4727 static void scx_cgroup_exit(struct scx_sched *sch) {}
scx_cgroup_init(struct scx_sched * sch)4728 static int scx_cgroup_init(struct scx_sched *sch) { return 0; }
4729 #endif
4730
4731
4732 /********************************************************************************
4733 * Sysfs interface and ops enable/disable.
4734 */
4735
4736 #define SCX_ATTR(_name) \
4737 static struct kobj_attribute scx_attr_##_name = { \
4738 .attr = { .name = __stringify(_name), .mode = 0444 }, \
4739 .show = scx_attr_##_name##_show, \
4740 }
4741
scx_attr_state_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4742 static ssize_t scx_attr_state_show(struct kobject *kobj,
4743 struct kobj_attribute *ka, char *buf)
4744 {
4745 return sysfs_emit(buf, "%s\n", scx_enable_state_str[scx_enable_state()]);
4746 }
4747 SCX_ATTR(state);
4748
scx_attr_switch_all_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4749 static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
4750 struct kobj_attribute *ka, char *buf)
4751 {
4752 return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
4753 }
4754 SCX_ATTR(switch_all);
4755
scx_attr_nr_rejected_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4756 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
4757 struct kobj_attribute *ka, char *buf)
4758 {
4759 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
4760 }
4761 SCX_ATTR(nr_rejected);
4762
scx_attr_hotplug_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4763 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
4764 struct kobj_attribute *ka, char *buf)
4765 {
4766 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
4767 }
4768 SCX_ATTR(hotplug_seq);
4769
scx_attr_enable_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4770 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
4771 struct kobj_attribute *ka, char *buf)
4772 {
4773 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
4774 }
4775 SCX_ATTR(enable_seq);
4776
4777 static struct attribute *scx_global_attrs[] = {
4778 &scx_attr_state.attr,
4779 &scx_attr_switch_all.attr,
4780 &scx_attr_nr_rejected.attr,
4781 &scx_attr_hotplug_seq.attr,
4782 &scx_attr_enable_seq.attr,
4783 NULL,
4784 };
4785
4786 static const struct attribute_group scx_global_attr_group = {
4787 .attrs = scx_global_attrs,
4788 };
4789
4790 static void free_pnode(struct scx_sched_pnode *pnode);
4791 static void free_exit_info(struct scx_exit_info *ei);
4792
scx_sched_free_rcu_work(struct work_struct * work)4793 static void scx_sched_free_rcu_work(struct work_struct *work)
4794 {
4795 struct rcu_work *rcu_work = to_rcu_work(work);
4796 struct scx_sched *sch = container_of(rcu_work, struct scx_sched, rcu_work);
4797 struct rhashtable_iter rht_iter;
4798 struct scx_dispatch_q *dsq;
4799 int cpu, node;
4800
4801 irq_work_sync(&sch->disable_irq_work);
4802 kthread_destroy_worker(sch->helper);
4803 timer_shutdown_sync(&sch->bypass_lb_timer);
4804 free_cpumask_var(sch->bypass_lb_donee_cpumask);
4805 free_cpumask_var(sch->bypass_lb_resched_cpumask);
4806
4807 #ifdef CONFIG_EXT_SUB_SCHED
4808 kfree(sch->cgrp_path);
4809 if (sch_cgroup(sch))
4810 cgroup_put(sch_cgroup(sch));
4811 #endif /* CONFIG_EXT_SUB_SCHED */
4812
4813 for_each_possible_cpu(cpu) {
4814 struct scx_sched_pcpu *pcpu = per_cpu_ptr(sch->pcpu, cpu);
4815
4816 /*
4817 * $sch would have entered bypass mode before the RCU grace
4818 * period. As that blocks new deferrals, all
4819 * deferred_reenq_local_node's must be off-list by now.
4820 */
4821 WARN_ON_ONCE(!list_empty(&pcpu->deferred_reenq_local.node));
4822
4823 exit_dsq(bypass_dsq(sch, cpu));
4824 }
4825
4826 free_percpu(sch->pcpu);
4827
4828 for_each_node_state(node, N_POSSIBLE)
4829 free_pnode(sch->pnode[node]);
4830 kfree(sch->pnode);
4831
4832 rhashtable_walk_enter(&sch->dsq_hash, &rht_iter);
4833 do {
4834 rhashtable_walk_start(&rht_iter);
4835
4836 while (!IS_ERR_OR_NULL((dsq = rhashtable_walk_next(&rht_iter))))
4837 destroy_dsq(sch, dsq->id);
4838
4839 rhashtable_walk_stop(&rht_iter);
4840 } while (dsq == ERR_PTR(-EAGAIN));
4841 rhashtable_walk_exit(&rht_iter);
4842
4843 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
4844 free_exit_info(sch->exit_info);
4845 kfree(sch);
4846 }
4847
scx_kobj_release(struct kobject * kobj)4848 static void scx_kobj_release(struct kobject *kobj)
4849 {
4850 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
4851
4852 INIT_RCU_WORK(&sch->rcu_work, scx_sched_free_rcu_work);
4853 queue_rcu_work(system_dfl_wq, &sch->rcu_work);
4854 }
4855
scx_attr_ops_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4856 static ssize_t scx_attr_ops_show(struct kobject *kobj,
4857 struct kobj_attribute *ka, char *buf)
4858 {
4859 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
4860
4861 return sysfs_emit(buf, "%s\n", sch->ops.name);
4862 }
4863 SCX_ATTR(ops);
4864
4865 #define scx_attr_event_show(buf, at, events, kind) ({ \
4866 sysfs_emit_at(buf, at, "%s %llu\n", #kind, (events)->kind); \
4867 })
4868
scx_attr_events_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4869 static ssize_t scx_attr_events_show(struct kobject *kobj,
4870 struct kobj_attribute *ka, char *buf)
4871 {
4872 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
4873 struct scx_event_stats events;
4874 int at = 0;
4875
4876 scx_read_events(sch, &events);
4877 at += scx_attr_event_show(buf, at, &events, SCX_EV_SELECT_CPU_FALLBACK);
4878 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
4879 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_KEEP_LAST);
4880 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_EXITING);
4881 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
4882 at += scx_attr_event_show(buf, at, &events, SCX_EV_REENQ_IMMED);
4883 at += scx_attr_event_show(buf, at, &events, SCX_EV_REENQ_LOCAL_REPEAT);
4884 at += scx_attr_event_show(buf, at, &events, SCX_EV_REFILL_SLICE_DFL);
4885 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DURATION);
4886 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DISPATCH);
4887 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_ACTIVATE);
4888 at += scx_attr_event_show(buf, at, &events, SCX_EV_INSERT_NOT_OWNED);
4889 at += scx_attr_event_show(buf, at, &events, SCX_EV_SUB_BYPASS_DISPATCH);
4890 return at;
4891 }
4892 SCX_ATTR(events);
4893
4894 static struct attribute *scx_sched_attrs[] = {
4895 &scx_attr_ops.attr,
4896 &scx_attr_events.attr,
4897 NULL,
4898 };
4899 ATTRIBUTE_GROUPS(scx_sched);
4900
4901 static const struct kobj_type scx_ktype = {
4902 .release = scx_kobj_release,
4903 .sysfs_ops = &kobj_sysfs_ops,
4904 .default_groups = scx_sched_groups,
4905 };
4906
scx_uevent(const struct kobject * kobj,struct kobj_uevent_env * env)4907 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
4908 {
4909 const struct scx_sched *sch;
4910
4911 /*
4912 * scx_uevent() can be reached by both scx_sched kobjects (scx_ktype)
4913 * and sub-scheduler kset kobjects (kset_ktype) through the parent
4914 * chain walk. Filter out the latter to avoid invalid casts.
4915 */
4916 if (kobj->ktype != &scx_ktype)
4917 return 0;
4918
4919 sch = container_of(kobj, struct scx_sched, kobj);
4920
4921 return add_uevent_var(env, "SCXOPS=%s", sch->ops.name);
4922 }
4923
4924 static const struct kset_uevent_ops scx_uevent_ops = {
4925 .uevent = scx_uevent,
4926 };
4927
4928 /*
4929 * Used by sched_fork() and __setscheduler_prio() to pick the matching
4930 * sched_class. dl/rt are already handled.
4931 */
task_should_scx(int policy)4932 bool task_should_scx(int policy)
4933 {
4934 if (!scx_enabled() || unlikely(scx_enable_state() == SCX_DISABLING))
4935 return false;
4936 if (READ_ONCE(scx_switching_all))
4937 return true;
4938 return policy == SCHED_EXT;
4939 }
4940
scx_allow_ttwu_queue(const struct task_struct * p)4941 bool scx_allow_ttwu_queue(const struct task_struct *p)
4942 {
4943 struct scx_sched *sch;
4944
4945 if (!scx_enabled())
4946 return true;
4947
4948 sch = scx_task_sched(p);
4949 if (unlikely(!sch))
4950 return true;
4951
4952 if (sch->ops.flags & SCX_OPS_ALLOW_QUEUED_WAKEUP)
4953 return true;
4954
4955 if (unlikely(p->sched_class != &ext_sched_class))
4956 return true;
4957
4958 return false;
4959 }
4960
4961 /**
4962 * handle_lockup - sched_ext common lockup handler
4963 * @fmt: format string
4964 *
4965 * Called on system stall or lockup condition and initiates abort of sched_ext
4966 * if enabled, which may resolve the reported lockup.
4967 *
4968 * Returns %true if sched_ext is enabled and abort was initiated, which may
4969 * resolve the lockup. %false if sched_ext is not enabled or abort was already
4970 * initiated by someone else.
4971 */
handle_lockup(const char * fmt,...)4972 static __printf(1, 2) bool handle_lockup(const char *fmt, ...)
4973 {
4974 struct scx_sched *sch;
4975 va_list args;
4976 bool ret;
4977
4978 guard(rcu)();
4979
4980 sch = rcu_dereference(scx_root);
4981 if (unlikely(!sch))
4982 return false;
4983
4984 switch (scx_enable_state()) {
4985 case SCX_ENABLING:
4986 case SCX_ENABLED:
4987 va_start(args, fmt);
4988 ret = scx_verror(sch, fmt, args);
4989 va_end(args);
4990 return ret;
4991 default:
4992 return false;
4993 }
4994 }
4995
4996 /**
4997 * scx_rcu_cpu_stall - sched_ext RCU CPU stall handler
4998 *
4999 * While there are various reasons why RCU CPU stalls can occur on a system
5000 * that may not be caused by the current BPF scheduler, try kicking out the
5001 * current scheduler in an attempt to recover the system to a good state before
5002 * issuing panics.
5003 *
5004 * Returns %true if sched_ext is enabled and abort was initiated, which may
5005 * resolve the reported RCU stall. %false if sched_ext is not enabled or someone
5006 * else already initiated abort.
5007 */
scx_rcu_cpu_stall(void)5008 bool scx_rcu_cpu_stall(void)
5009 {
5010 return handle_lockup("RCU CPU stall detected!");
5011 }
5012
5013 /**
5014 * scx_softlockup - sched_ext softlockup handler
5015 * @dur_s: number of seconds of CPU stuck due to soft lockup
5016 *
5017 * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can
5018 * live-lock the system by making many CPUs target the same DSQ to the point
5019 * where soft-lockup detection triggers. This function is called from
5020 * soft-lockup watchdog when the triggering point is close and tries to unjam
5021 * the system and aborting the BPF scheduler.
5022 */
scx_softlockup(u32 dur_s)5023 void scx_softlockup(u32 dur_s)
5024 {
5025 if (!handle_lockup("soft lockup - CPU %d stuck for %us", smp_processor_id(), dur_s))
5026 return;
5027
5028 printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU %d stuck for %us, disabling BPF scheduler\n",
5029 smp_processor_id(), dur_s);
5030 }
5031
5032 /*
5033 * scx_hardlockup() runs from NMI and eventually calls scx_claim_exit(),
5034 * which takes scx_sched_lock. scx_sched_lock isn't NMI-safe and grabbing
5035 * it from NMI context can lead to deadlocks. Defer via irq_work; the
5036 * disable path runs off irq_work anyway.
5037 */
5038 static atomic_t scx_hardlockup_cpu = ATOMIC_INIT(-1);
5039
scx_hardlockup_irq_workfn(struct irq_work * work)5040 static void scx_hardlockup_irq_workfn(struct irq_work *work)
5041 {
5042 int cpu = atomic_xchg(&scx_hardlockup_cpu, -1);
5043
5044 if (cpu >= 0 && handle_lockup("hard lockup - CPU %d", cpu))
5045 printk_deferred(KERN_ERR "sched_ext: Hard lockup - CPU %d, disabling BPF scheduler\n",
5046 cpu);
5047 }
5048
5049 static DEFINE_IRQ_WORK(scx_hardlockup_irq_work, scx_hardlockup_irq_workfn);
5050
5051 /**
5052 * scx_hardlockup - sched_ext hardlockup handler
5053 *
5054 * A poorly behaving BPF scheduler can trigger hard lockup by e.g. putting
5055 * numerous affinitized tasks in a single queue and directing all CPUs at it.
5056 * Try kicking out the current scheduler in an attempt to recover the system to
5057 * a good state before taking more drastic actions.
5058 *
5059 * Queues an irq_work; the handle_lockup() call happens in IRQ context (see
5060 * scx_hardlockup_irq_workfn).
5061 *
5062 * Returns %true if sched_ext is enabled and the work was queued, %false
5063 * otherwise.
5064 */
scx_hardlockup(int cpu)5065 bool scx_hardlockup(int cpu)
5066 {
5067 if (!rcu_access_pointer(scx_root))
5068 return false;
5069
5070 atomic_cmpxchg(&scx_hardlockup_cpu, -1, cpu);
5071 irq_work_queue(&scx_hardlockup_irq_work);
5072 return true;
5073 }
5074
bypass_lb_cpu(struct scx_sched * sch,s32 donor,struct cpumask * donee_mask,struct cpumask * resched_mask,u32 nr_donor_target,u32 nr_donee_target)5075 static u32 bypass_lb_cpu(struct scx_sched *sch, s32 donor,
5076 struct cpumask *donee_mask, struct cpumask *resched_mask,
5077 u32 nr_donor_target, u32 nr_donee_target)
5078 {
5079 struct rq *donor_rq = cpu_rq(donor);
5080 struct scx_dispatch_q *donor_dsq = bypass_dsq(sch, donor);
5081 struct task_struct *p, *n;
5082 struct scx_dsq_list_node cursor = INIT_DSQ_LIST_CURSOR(cursor, donor_dsq, 0);
5083 s32 delta = READ_ONCE(donor_dsq->nr) - nr_donor_target;
5084 u32 nr_balanced = 0, min_delta_us;
5085
5086 /*
5087 * All we want to guarantee is reasonable forward progress. No reason to
5088 * fine tune. Assuming every task on @donor_dsq runs their full slice,
5089 * consider offloading iff the total queued duration is over the
5090 * threshold.
5091 */
5092 min_delta_us = READ_ONCE(scx_bypass_lb_intv_us) / SCX_BYPASS_LB_MIN_DELTA_DIV;
5093 if (delta < DIV_ROUND_UP(min_delta_us, READ_ONCE(scx_slice_bypass_us)))
5094 return 0;
5095
5096 raw_spin_rq_lock_irq(donor_rq);
5097 raw_spin_lock(&donor_dsq->lock);
5098 list_add(&cursor.node, &donor_dsq->list);
5099 resume:
5100 n = container_of(&cursor, struct task_struct, scx.dsq_list);
5101 n = nldsq_next_task(donor_dsq, n, false);
5102
5103 while ((p = n)) {
5104 struct scx_dispatch_q *donee_dsq;
5105 int donee;
5106
5107 n = nldsq_next_task(donor_dsq, n, false);
5108
5109 if (donor_dsq->nr <= nr_donor_target)
5110 break;
5111
5112 if (cpumask_empty(donee_mask))
5113 break;
5114
5115 /*
5116 * If an earlier pass placed @p on @donor_dsq from a different
5117 * CPU and the donee hasn't consumed it yet, @p is still on the
5118 * previous CPU and task_rq(@p) != @donor_rq. @p can't be moved
5119 * without its rq locked. Skip.
5120 */
5121 if (task_rq(p) != donor_rq)
5122 continue;
5123
5124 donee = cpumask_any_and_distribute(donee_mask, p->cpus_ptr);
5125 if (donee >= nr_cpu_ids)
5126 continue;
5127
5128 donee_dsq = bypass_dsq(sch, donee);
5129
5130 /*
5131 * $p's rq is not locked but $p's DSQ lock protects its
5132 * scheduling properties making this test safe.
5133 */
5134 if (!task_can_run_on_remote_rq(sch, p, cpu_rq(donee), false))
5135 continue;
5136
5137 /*
5138 * Moving $p from one non-local DSQ to another. The source rq
5139 * and DSQ are already locked. Do an abbreviated dequeue and
5140 * then perform enqueue without unlocking $donor_dsq.
5141 *
5142 * We don't want to drop and reacquire the lock on each
5143 * iteration as @donor_dsq can be very long and potentially
5144 * highly contended. Donee DSQs are less likely to be contended.
5145 * The nested locking is safe as only this LB moves tasks
5146 * between bypass DSQs.
5147 */
5148 dispatch_dequeue_locked(p, donor_dsq);
5149 dispatch_enqueue(sch, cpu_rq(donee), donee_dsq, p, SCX_ENQ_NESTED);
5150
5151 /*
5152 * $donee might have been idle and need to be woken up. No need
5153 * to be clever. Kick every CPU that receives tasks.
5154 */
5155 cpumask_set_cpu(donee, resched_mask);
5156
5157 if (READ_ONCE(donee_dsq->nr) >= nr_donee_target)
5158 cpumask_clear_cpu(donee, donee_mask);
5159
5160 nr_balanced++;
5161 if (!(nr_balanced % SCX_BYPASS_LB_BATCH) && n) {
5162 list_move_tail(&cursor.node, &n->scx.dsq_list.node);
5163 raw_spin_unlock(&donor_dsq->lock);
5164 raw_spin_rq_unlock_irq(donor_rq);
5165 cpu_relax();
5166 raw_spin_rq_lock_irq(donor_rq);
5167 raw_spin_lock(&donor_dsq->lock);
5168 goto resume;
5169 }
5170 }
5171
5172 list_del_init(&cursor.node);
5173 raw_spin_unlock(&donor_dsq->lock);
5174 raw_spin_rq_unlock_irq(donor_rq);
5175
5176 return nr_balanced;
5177 }
5178
bypass_lb_node(struct scx_sched * sch,int node)5179 static void bypass_lb_node(struct scx_sched *sch, int node)
5180 {
5181 const struct cpumask *node_mask = cpumask_of_node(node);
5182 struct cpumask *donee_mask = sch->bypass_lb_donee_cpumask;
5183 struct cpumask *resched_mask = sch->bypass_lb_resched_cpumask;
5184 u32 nr_tasks = 0, nr_cpus = 0, nr_balanced = 0;
5185 u32 nr_target, nr_donor_target;
5186 u32 before_min = U32_MAX, before_max = 0;
5187 u32 after_min = U32_MAX, after_max = 0;
5188 int cpu;
5189
5190 /* count the target tasks and CPUs */
5191 for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
5192 u32 nr = READ_ONCE(bypass_dsq(sch, cpu)->nr);
5193
5194 nr_tasks += nr;
5195 nr_cpus++;
5196
5197 before_min = min(nr, before_min);
5198 before_max = max(nr, before_max);
5199 }
5200
5201 if (!nr_cpus)
5202 return;
5203
5204 /*
5205 * We don't want CPUs to have more than $nr_donor_target tasks and
5206 * balancing to fill donee CPUs upto $nr_target. Once targets are
5207 * calculated, find the donee CPUs.
5208 */
5209 nr_target = DIV_ROUND_UP(nr_tasks, nr_cpus);
5210 nr_donor_target = DIV_ROUND_UP(nr_target * SCX_BYPASS_LB_DONOR_PCT, 100);
5211
5212 cpumask_clear(donee_mask);
5213 for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
5214 if (READ_ONCE(bypass_dsq(sch, cpu)->nr) < nr_target)
5215 cpumask_set_cpu(cpu, donee_mask);
5216 }
5217
5218 /* iterate !donee CPUs and see if they should be offloaded */
5219 cpumask_clear(resched_mask);
5220 for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
5221 if (cpumask_empty(donee_mask))
5222 break;
5223 if (cpumask_test_cpu(cpu, donee_mask))
5224 continue;
5225 if (READ_ONCE(bypass_dsq(sch, cpu)->nr) <= nr_donor_target)
5226 continue;
5227
5228 nr_balanced += bypass_lb_cpu(sch, cpu, donee_mask, resched_mask,
5229 nr_donor_target, nr_target);
5230 }
5231
5232 for_each_cpu(cpu, resched_mask)
5233 resched_cpu(cpu);
5234
5235 for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
5236 u32 nr = READ_ONCE(bypass_dsq(sch, cpu)->nr);
5237
5238 after_min = min(nr, after_min);
5239 after_max = max(nr, after_max);
5240
5241 }
5242
5243 trace_sched_ext_bypass_lb(node, nr_cpus, nr_tasks, nr_balanced,
5244 before_min, before_max, after_min, after_max);
5245 }
5246
5247 /*
5248 * In bypass mode, all tasks are put on the per-CPU bypass DSQs. If the machine
5249 * is over-saturated and the BPF scheduler skewed tasks into few CPUs, some
5250 * bypass DSQs can be overloaded. If there are enough tasks to saturate other
5251 * lightly loaded CPUs, such imbalance can lead to very high execution latency
5252 * on the overloaded CPUs and thus to hung tasks and RCU stalls. To avoid such
5253 * outcomes, a simple load balancing mechanism is implemented by the following
5254 * timer which runs periodically while bypass mode is in effect.
5255 */
scx_bypass_lb_timerfn(struct timer_list * timer)5256 static void scx_bypass_lb_timerfn(struct timer_list *timer)
5257 {
5258 struct scx_sched *sch = container_of(timer, struct scx_sched, bypass_lb_timer);
5259 int node;
5260 u32 intv_us;
5261
5262 if (!bypass_dsp_enabled(sch))
5263 return;
5264
5265 for_each_node_with_cpus(node)
5266 bypass_lb_node(sch, node);
5267
5268 intv_us = READ_ONCE(scx_bypass_lb_intv_us);
5269 if (intv_us)
5270 mod_timer(timer, jiffies + usecs_to_jiffies(intv_us));
5271 }
5272
inc_bypass_depth(struct scx_sched * sch)5273 static bool inc_bypass_depth(struct scx_sched *sch)
5274 {
5275 lockdep_assert_held(&scx_bypass_lock);
5276
5277 WARN_ON_ONCE(sch->bypass_depth < 0);
5278 WRITE_ONCE(sch->bypass_depth, sch->bypass_depth + 1);
5279 if (sch->bypass_depth != 1)
5280 return false;
5281
5282 WRITE_ONCE(sch->slice_dfl, READ_ONCE(scx_slice_bypass_us) * NSEC_PER_USEC);
5283 sch->bypass_timestamp = ktime_get_ns();
5284 scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1);
5285 return true;
5286 }
5287
dec_bypass_depth(struct scx_sched * sch)5288 static bool dec_bypass_depth(struct scx_sched *sch)
5289 {
5290 lockdep_assert_held(&scx_bypass_lock);
5291
5292 WARN_ON_ONCE(sch->bypass_depth < 1);
5293 WRITE_ONCE(sch->bypass_depth, sch->bypass_depth - 1);
5294 if (sch->bypass_depth != 0)
5295 return false;
5296
5297 WRITE_ONCE(sch->slice_dfl, SCX_SLICE_DFL);
5298 scx_add_event(sch, SCX_EV_BYPASS_DURATION,
5299 ktime_get_ns() - sch->bypass_timestamp);
5300 return true;
5301 }
5302
enable_bypass_dsp(struct scx_sched * sch)5303 static void enable_bypass_dsp(struct scx_sched *sch)
5304 {
5305 struct scx_sched *host = scx_parent(sch) ?: sch;
5306 u32 intv_us = READ_ONCE(scx_bypass_lb_intv_us);
5307 s32 ret;
5308
5309 /*
5310 * @sch->bypass_depth transitioning from 0 to 1 triggers enabling.
5311 * Shouldn't stagger.
5312 */
5313 if (WARN_ON_ONCE(test_and_set_bit(0, &sch->bypass_dsp_claim)))
5314 return;
5315
5316 /*
5317 * When a sub-sched bypasses, its tasks are queued on the bypass DSQs of
5318 * the nearest non-bypassing ancestor or root. As enable_bypass_dsp() is
5319 * called iff @sch is not already bypassed due to an ancestor bypassing,
5320 * we can assume that the parent is not bypassing and thus will be the
5321 * host of the bypass DSQs.
5322 *
5323 * While the situation may change in the future, the following
5324 * guarantees that the nearest non-bypassing ancestor or root has bypass
5325 * dispatch enabled while a descendant is bypassing, which is all that's
5326 * required.
5327 *
5328 * bypass_dsp_enabled() test is used to determine whether to enter the
5329 * bypass dispatch handling path from both bypassing and hosting scheds.
5330 * Bump enable depth on both @sch and bypass dispatch host.
5331 */
5332 ret = atomic_inc_return(&sch->bypass_dsp_enable_depth);
5333 WARN_ON_ONCE(ret <= 0);
5334
5335 if (host != sch) {
5336 ret = atomic_inc_return(&host->bypass_dsp_enable_depth);
5337 WARN_ON_ONCE(ret <= 0);
5338 }
5339
5340 /*
5341 * The LB timer will stop running if bypass dispatch is disabled. Start
5342 * after enabling bypass dispatch.
5343 */
5344 if (intv_us && !timer_pending(&host->bypass_lb_timer))
5345 mod_timer(&host->bypass_lb_timer,
5346 jiffies + usecs_to_jiffies(intv_us));
5347 }
5348
5349 /* may be called without holding scx_bypass_lock */
disable_bypass_dsp(struct scx_sched * sch)5350 static void disable_bypass_dsp(struct scx_sched *sch)
5351 {
5352 s32 ret;
5353
5354 if (!test_and_clear_bit(0, &sch->bypass_dsp_claim))
5355 return;
5356
5357 ret = atomic_dec_return(&sch->bypass_dsp_enable_depth);
5358 WARN_ON_ONCE(ret < 0);
5359
5360 if (scx_parent(sch)) {
5361 ret = atomic_dec_return(&scx_parent(sch)->bypass_dsp_enable_depth);
5362 WARN_ON_ONCE(ret < 0);
5363 }
5364 }
5365
5366 /**
5367 * scx_bypass - [Un]bypass scx_ops and guarantee forward progress
5368 * @sch: sched to bypass
5369 * @bypass: true for bypass, false for unbypass
5370 *
5371 * Bypassing guarantees that all runnable tasks make forward progress without
5372 * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
5373 * be held by tasks that the BPF scheduler is forgetting to run, which
5374 * unfortunately also excludes toggling the static branches.
5375 *
5376 * Let's work around by overriding a couple ops and modifying behaviors based on
5377 * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
5378 * to force global FIFO scheduling.
5379 *
5380 * - ops.select_cpu() is ignored and the default select_cpu() is used.
5381 *
5382 * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
5383 * %SCX_OPS_ENQ_LAST is also ignored.
5384 *
5385 * - ops.dispatch() is ignored.
5386 *
5387 * - balance_one() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
5388 * can't be trusted. Whenever a tick triggers, the running task is rotated to
5389 * the tail of the queue with core_sched_at touched.
5390 *
5391 * - pick_next_task() suppresses zero slice warning.
5392 *
5393 * - scx_kick_cpu() is disabled to avoid irq_work malfunction during PM
5394 * operations.
5395 *
5396 * - scx_prio_less() reverts to the default core_sched_at order.
5397 */
scx_bypass(struct scx_sched * sch,bool bypass)5398 static void scx_bypass(struct scx_sched *sch, bool bypass)
5399 {
5400 struct scx_sched *pos;
5401 unsigned long flags;
5402 int cpu;
5403
5404 raw_spin_lock_irqsave(&scx_bypass_lock, flags);
5405
5406 if (bypass) {
5407 if (!inc_bypass_depth(sch))
5408 goto unlock;
5409
5410 enable_bypass_dsp(sch);
5411 } else {
5412 if (!dec_bypass_depth(sch))
5413 goto unlock;
5414 }
5415
5416 /*
5417 * Bypass state is propagated to all descendants - an scx_sched bypasses
5418 * if itself or any of its ancestors are in bypass mode.
5419 */
5420 raw_spin_lock(&scx_sched_lock);
5421 scx_for_each_descendant_pre(pos, sch) {
5422 if (pos == sch)
5423 continue;
5424 if (bypass)
5425 inc_bypass_depth(pos);
5426 else
5427 dec_bypass_depth(pos);
5428 }
5429 raw_spin_unlock(&scx_sched_lock);
5430
5431 /*
5432 * No task property is changing. We just need to make sure all currently
5433 * queued tasks are re-queued according to the new scx_bypassing()
5434 * state. As an optimization, walk each rq's runnable_list instead of
5435 * the scx_tasks list.
5436 *
5437 * This function can't trust the scheduler and thus can't use
5438 * cpus_read_lock(). Walk all possible CPUs instead of online.
5439 */
5440 for_each_possible_cpu(cpu) {
5441 struct rq *rq = cpu_rq(cpu);
5442 struct task_struct *p, *n;
5443
5444 raw_spin_rq_lock(rq);
5445 raw_spin_lock(&scx_sched_lock);
5446
5447 scx_for_each_descendant_pre(pos, sch) {
5448 struct scx_sched_pcpu *pcpu = per_cpu_ptr(pos->pcpu, cpu);
5449
5450 if (pos->bypass_depth)
5451 pcpu->flags |= SCX_SCHED_PCPU_BYPASSING;
5452 else
5453 pcpu->flags &= ~SCX_SCHED_PCPU_BYPASSING;
5454 }
5455
5456 raw_spin_unlock(&scx_sched_lock);
5457
5458 /*
5459 * We need to guarantee that no tasks are on the BPF scheduler
5460 * while bypassing. Either we see enabled or the enable path
5461 * sees scx_bypassing() before moving tasks to SCX.
5462 */
5463 if (!scx_enabled()) {
5464 raw_spin_rq_unlock(rq);
5465 continue;
5466 }
5467
5468 /*
5469 * The use of list_for_each_entry_safe_reverse() is required
5470 * because each task is going to be removed from and added back
5471 * to the runnable_list during iteration. Because they're added
5472 * to the tail of the list, safe reverse iteration can still
5473 * visit all nodes.
5474 */
5475 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
5476 scx.runnable_node) {
5477 if (!scx_is_descendant(scx_task_sched(p), sch))
5478 continue;
5479
5480 /* cycling deq/enq is enough, see the function comment */
5481 scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_MOVE) {
5482 /* nothing */ ;
5483 }
5484 }
5485
5486 /* resched to restore ticks and idle state */
5487 if (cpu_online(cpu) || cpu == smp_processor_id())
5488 resched_curr(rq);
5489
5490 raw_spin_rq_unlock(rq);
5491 }
5492
5493 /* disarming must come after moving all tasks out of the bypass DSQs */
5494 if (!bypass)
5495 disable_bypass_dsp(sch);
5496 unlock:
5497 raw_spin_unlock_irqrestore(&scx_bypass_lock, flags);
5498 }
5499
free_exit_info(struct scx_exit_info * ei)5500 static void free_exit_info(struct scx_exit_info *ei)
5501 {
5502 kvfree(ei->dump);
5503 kfree(ei->msg);
5504 kfree(ei->bt);
5505 kfree(ei);
5506 }
5507
alloc_exit_info(size_t exit_dump_len)5508 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
5509 {
5510 struct scx_exit_info *ei;
5511
5512 ei = kzalloc_obj(*ei);
5513 if (!ei)
5514 return NULL;
5515
5516 ei->bt = kzalloc_objs(ei->bt[0], SCX_EXIT_BT_LEN);
5517 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
5518 ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL);
5519
5520 if (!ei->bt || !ei->msg || !ei->dump) {
5521 free_exit_info(ei);
5522 return NULL;
5523 }
5524
5525 return ei;
5526 }
5527
scx_exit_reason(enum scx_exit_kind kind)5528 static const char *scx_exit_reason(enum scx_exit_kind kind)
5529 {
5530 switch (kind) {
5531 case SCX_EXIT_UNREG:
5532 return "unregistered from user space";
5533 case SCX_EXIT_UNREG_BPF:
5534 return "unregistered from BPF";
5535 case SCX_EXIT_UNREG_KERN:
5536 return "unregistered from the main kernel";
5537 case SCX_EXIT_SYSRQ:
5538 return "disabled by sysrq-S";
5539 case SCX_EXIT_PARENT:
5540 return "parent exiting";
5541 case SCX_EXIT_ERROR:
5542 return "runtime error";
5543 case SCX_EXIT_ERROR_BPF:
5544 return "scx_bpf_error";
5545 case SCX_EXIT_ERROR_STALL:
5546 return "runnable task stall";
5547 default:
5548 return "<UNKNOWN>";
5549 }
5550 }
5551
free_kick_syncs(void)5552 static void free_kick_syncs(void)
5553 {
5554 int cpu;
5555
5556 for_each_possible_cpu(cpu) {
5557 struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu);
5558 struct scx_kick_syncs *to_free;
5559
5560 to_free = rcu_replace_pointer(*ksyncs, NULL, true);
5561 if (to_free)
5562 kvfree_rcu(to_free, rcu);
5563 }
5564 }
5565
refresh_watchdog(void)5566 static void refresh_watchdog(void)
5567 {
5568 struct scx_sched *sch;
5569 unsigned long intv = ULONG_MAX;
5570
5571 /* take the shortest timeout and use its half for watchdog interval */
5572 rcu_read_lock();
5573 list_for_each_entry_rcu(sch, &scx_sched_all, all)
5574 intv = max(min(intv, sch->watchdog_timeout / 2), 1);
5575 rcu_read_unlock();
5576
5577 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
5578 WRITE_ONCE(scx_watchdog_interval, intv);
5579
5580 if (intv < ULONG_MAX)
5581 mod_delayed_work(system_dfl_wq, &scx_watchdog_work, intv);
5582 else
5583 cancel_delayed_work_sync(&scx_watchdog_work);
5584 }
5585
scx_link_sched(struct scx_sched * sch)5586 static s32 scx_link_sched(struct scx_sched *sch)
5587 {
5588 scoped_guard(raw_spinlock_irq, &scx_sched_lock) {
5589 #ifdef CONFIG_EXT_SUB_SCHED
5590 struct scx_sched *parent = scx_parent(sch);
5591 s32 ret;
5592
5593 if (parent) {
5594 /*
5595 * scx_claim_exit() propagates exit_kind transition to
5596 * its sub-scheds while holding scx_sched_lock - either
5597 * we can see the parent's non-NONE exit_kind or the
5598 * parent can shoot us down.
5599 */
5600 if (atomic_read(&parent->exit_kind) != SCX_EXIT_NONE) {
5601 scx_error(sch, "parent disabled");
5602 return -ENOENT;
5603 }
5604
5605 ret = rhashtable_lookup_insert_fast(&scx_sched_hash,
5606 &sch->hash_node, scx_sched_hash_params);
5607 if (ret) {
5608 scx_error(sch, "failed to insert into scx_sched_hash (%d)", ret);
5609 return ret;
5610 }
5611
5612 list_add_tail(&sch->sibling, &parent->children);
5613 }
5614 #endif /* CONFIG_EXT_SUB_SCHED */
5615
5616 list_add_tail_rcu(&sch->all, &scx_sched_all);
5617 }
5618
5619 refresh_watchdog();
5620 return 0;
5621 }
5622
scx_unlink_sched(struct scx_sched * sch)5623 static void scx_unlink_sched(struct scx_sched *sch)
5624 {
5625 scoped_guard(raw_spinlock_irq, &scx_sched_lock) {
5626 #ifdef CONFIG_EXT_SUB_SCHED
5627 if (scx_parent(sch)) {
5628 rhashtable_remove_fast(&scx_sched_hash, &sch->hash_node,
5629 scx_sched_hash_params);
5630 list_del_init(&sch->sibling);
5631 }
5632 #endif /* CONFIG_EXT_SUB_SCHED */
5633 list_del_rcu(&sch->all);
5634 }
5635
5636 refresh_watchdog();
5637 }
5638
5639 /*
5640 * Called to disable future dumps and wait for in-progress one while disabling
5641 * @sch. Once @sch becomes empty during disable, there's no point in dumping it.
5642 * This prevents calling dump ops on a dead sch.
5643 */
scx_disable_dump(struct scx_sched * sch)5644 static void scx_disable_dump(struct scx_sched *sch)
5645 {
5646 guard(raw_spinlock_irqsave)(&scx_dump_lock);
5647 sch->dump_disabled = true;
5648 }
5649
5650 #ifdef CONFIG_EXT_SUB_SCHED
5651 static DECLARE_WAIT_QUEUE_HEAD(scx_unlink_waitq);
5652
drain_descendants(struct scx_sched * sch)5653 static void drain_descendants(struct scx_sched *sch)
5654 {
5655 /*
5656 * Child scheds that finished the critical part of disabling will take
5657 * themselves off @sch->children. Wait for it to drain. As propagation
5658 * is recursive, empty @sch->children means that all proper descendant
5659 * scheds reached unlinking stage.
5660 */
5661 wait_event(scx_unlink_waitq, list_empty(&sch->children));
5662 }
5663
scx_fail_parent(struct scx_sched * sch,struct task_struct * failed,s32 fail_code)5664 static void scx_fail_parent(struct scx_sched *sch,
5665 struct task_struct *failed, s32 fail_code)
5666 {
5667 struct scx_sched *parent = scx_parent(sch);
5668 struct scx_task_iter sti;
5669 struct task_struct *p;
5670
5671 scx_error(parent, "ops.init_task() failed (%d) for %s[%d] while disabling a sub-scheduler",
5672 fail_code, failed->comm, failed->pid);
5673
5674 /*
5675 * Once $parent is bypassed, it's safe to put SCX_TASK_NONE tasks into
5676 * it. This may cause downstream failures on the BPF side but $parent is
5677 * dying anyway.
5678 */
5679 scx_bypass(parent, true);
5680
5681 scx_task_iter_start(&sti, sch->cgrp);
5682 while ((p = scx_task_iter_next_locked(&sti))) {
5683 if (scx_task_on_sched(parent, p))
5684 continue;
5685
5686 scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_MOVE) {
5687 scx_disable_and_exit_task(sch, p);
5688 rcu_assign_pointer(p->scx.sched, parent);
5689 }
5690 }
5691 scx_task_iter_stop(&sti);
5692 }
5693
scx_sub_disable(struct scx_sched * sch)5694 static void scx_sub_disable(struct scx_sched *sch)
5695 {
5696 struct scx_sched *parent = scx_parent(sch);
5697 struct scx_task_iter sti;
5698 struct task_struct *p;
5699 int ret;
5700
5701 /*
5702 * Guarantee forward progress and wait for descendants to be disabled.
5703 * To limit disruptions, $parent is not bypassed. Tasks are fully
5704 * prepped and then inserted back into $parent.
5705 */
5706 scx_bypass(sch, true);
5707 drain_descendants(sch);
5708
5709 /*
5710 * Here, every runnable task is guaranteed to make forward progress and
5711 * we can safely use blocking synchronization constructs. Actually
5712 * disable ops.
5713 */
5714 mutex_lock(&scx_enable_mutex);
5715 percpu_down_write(&scx_fork_rwsem);
5716 scx_cgroup_lock();
5717
5718 set_cgroup_sched(sch_cgroup(sch), parent);
5719
5720 scx_task_iter_start(&sti, sch->cgrp);
5721 while ((p = scx_task_iter_next_locked(&sti))) {
5722 struct rq *rq;
5723 struct rq_flags rf;
5724
5725 /* filter out duplicate visits */
5726 if (scx_task_on_sched(parent, p))
5727 continue;
5728
5729 /*
5730 * By the time control reaches here, all descendant schedulers
5731 * should already have been disabled.
5732 */
5733 WARN_ON_ONCE(!scx_task_on_sched(sch, p));
5734
5735 /*
5736 * If $p is about to be freed, nothing prevents $sch from
5737 * unloading before $p reaches sched_ext_free(). Disable and
5738 * exit $p right away.
5739 */
5740 if (!tryget_task_struct(p)) {
5741 scx_disable_and_exit_task(sch, p);
5742 continue;
5743 }
5744
5745 scx_task_iter_unlock(&sti);
5746
5747 /*
5748 * $p is READY or ENABLED on @sch. Initialize for $parent,
5749 * disable and exit from @sch, and then switch over to $parent.
5750 *
5751 * If a task fails to initialize for $parent, the only available
5752 * action is disabling $parent too. While this allows disabling
5753 * of a child sched to cause the parent scheduler to fail, the
5754 * failure can only originate from ops.init_task() of the
5755 * parent. A child can't directly affect the parent through its
5756 * own failures.
5757 */
5758 ret = __scx_init_task(parent, p, false);
5759 if (ret) {
5760 scx_fail_parent(sch, p, ret);
5761 put_task_struct(p);
5762 break;
5763 }
5764
5765 rq = task_rq_lock(p, &rf);
5766 scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_MOVE) {
5767 /*
5768 * $p is initialized for $parent and still attached to
5769 * @sch. Disable and exit for @sch, switch over to
5770 * $parent, override the state to READY to account for
5771 * $p having already been initialized, and then enable.
5772 */
5773 scx_disable_and_exit_task(sch, p);
5774 scx_set_task_state(p, SCX_TASK_INIT);
5775 rcu_assign_pointer(p->scx.sched, parent);
5776 scx_set_task_state(p, SCX_TASK_READY);
5777 scx_enable_task(parent, p);
5778 }
5779 task_rq_unlock(rq, p, &rf);
5780
5781 put_task_struct(p);
5782 }
5783 scx_task_iter_stop(&sti);
5784
5785 scx_disable_dump(sch);
5786
5787 scx_cgroup_unlock();
5788 percpu_up_write(&scx_fork_rwsem);
5789
5790 /*
5791 * All tasks are moved off of @sch but there may still be on-going
5792 * operations (e.g. ops.select_cpu()). Drain them by flushing RCU. Use
5793 * the expedited version as ancestors may be waiting in bypass mode.
5794 * Also, tell the parent that there is no need to keep running bypass
5795 * DSQs for us.
5796 */
5797 synchronize_rcu_expedited();
5798 disable_bypass_dsp(sch);
5799
5800 scx_unlink_sched(sch);
5801
5802 mutex_unlock(&scx_enable_mutex);
5803
5804 /*
5805 * @sch is now unlinked from the parent's children list. Notify and call
5806 * ops.sub_detach/exit(). Note that ops.sub_detach/exit() must be called
5807 * after unlinking and releasing all locks. See scx_claim_exit().
5808 */
5809 wake_up_all(&scx_unlink_waitq);
5810
5811 if (parent->ops.sub_detach && sch->sub_attached) {
5812 struct scx_sub_detach_args sub_detach_args = {
5813 .ops = &sch->ops,
5814 .cgroup_path = sch->cgrp_path,
5815 };
5816 SCX_CALL_OP(parent, sub_detach, NULL,
5817 &sub_detach_args);
5818 }
5819
5820 if (sch->ops.exit)
5821 SCX_CALL_OP(sch, exit, NULL, sch->exit_info);
5822 if (sch->sub_kset)
5823 kset_unregister(sch->sub_kset);
5824 kobject_del(&sch->kobj);
5825 }
5826 #else /* CONFIG_EXT_SUB_SCHED */
drain_descendants(struct scx_sched * sch)5827 static void drain_descendants(struct scx_sched *sch) { }
scx_sub_disable(struct scx_sched * sch)5828 static void scx_sub_disable(struct scx_sched *sch) { }
5829 #endif /* CONFIG_EXT_SUB_SCHED */
5830
scx_root_disable(struct scx_sched * sch)5831 static void scx_root_disable(struct scx_sched *sch)
5832 {
5833 struct scx_exit_info *ei = sch->exit_info;
5834 struct scx_task_iter sti;
5835 struct task_struct *p;
5836 int cpu;
5837
5838 /* guarantee forward progress and wait for descendants to be disabled */
5839 scx_bypass(sch, true);
5840 drain_descendants(sch);
5841
5842 switch (scx_set_enable_state(SCX_DISABLING)) {
5843 case SCX_DISABLING:
5844 WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
5845 break;
5846 case SCX_DISABLED:
5847 pr_warn("sched_ext: ops error detected without ops (%s)\n",
5848 sch->exit_info->msg);
5849 WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
5850 goto done;
5851 default:
5852 break;
5853 }
5854
5855 /*
5856 * Here, every runnable task is guaranteed to make forward progress and
5857 * we can safely use blocking synchronization constructs. Actually
5858 * disable ops.
5859 */
5860 mutex_lock(&scx_enable_mutex);
5861
5862 static_branch_disable(&__scx_switched_all);
5863 WRITE_ONCE(scx_switching_all, false);
5864
5865 /*
5866 * Shut down cgroup support before tasks so that the cgroup attach path
5867 * doesn't race against scx_disable_and_exit_task().
5868 */
5869 scx_cgroup_lock();
5870 scx_cgroup_exit(sch);
5871 scx_cgroup_unlock();
5872
5873 /*
5874 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
5875 * must be switched out and exited synchronously.
5876 */
5877 percpu_down_write(&scx_fork_rwsem);
5878
5879 scx_init_task_enabled = false;
5880
5881 scx_task_iter_start(&sti, NULL);
5882 while ((p = scx_task_iter_next_locked(&sti))) {
5883 unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
5884 const struct sched_class *old_class = p->sched_class;
5885 const struct sched_class *new_class = scx_setscheduler_class(p);
5886
5887 update_rq_clock(task_rq(p));
5888
5889 if (old_class != new_class)
5890 queue_flags |= DEQUEUE_CLASS;
5891
5892 scoped_guard (sched_change, p, queue_flags) {
5893 p->sched_class = new_class;
5894 }
5895
5896 scx_disable_and_exit_task(scx_task_sched(p), p);
5897 }
5898 scx_task_iter_stop(&sti);
5899
5900 scx_disable_dump(sch);
5901
5902 scx_cgroup_lock();
5903 set_cgroup_sched(sch_cgroup(sch), NULL);
5904 scx_cgroup_unlock();
5905
5906 percpu_up_write(&scx_fork_rwsem);
5907
5908 /*
5909 * Invalidate all the rq clocks to prevent getting outdated
5910 * rq clocks from a previous scx scheduler.
5911 */
5912 for_each_possible_cpu(cpu) {
5913 struct rq *rq = cpu_rq(cpu);
5914 scx_rq_clock_invalidate(rq);
5915 }
5916
5917 /* no task is on scx, turn off all the switches and flush in-progress calls */
5918 static_branch_disable(&__scx_enabled);
5919 bitmap_zero(sch->has_op, SCX_OPI_END);
5920 scx_idle_disable();
5921 synchronize_rcu();
5922
5923 if (ei->kind >= SCX_EXIT_ERROR) {
5924 pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
5925 sch->ops.name, ei->reason);
5926
5927 if (ei->msg[0] != '\0')
5928 pr_err("sched_ext: %s: %s\n", sch->ops.name, ei->msg);
5929 #ifdef CONFIG_STACKTRACE
5930 stack_trace_print(ei->bt, ei->bt_len, 2);
5931 #endif
5932 } else {
5933 pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
5934 sch->ops.name, ei->reason);
5935 }
5936
5937 if (sch->ops.exit)
5938 SCX_CALL_OP(sch, exit, NULL, ei);
5939
5940 scx_unlink_sched(sch);
5941
5942 /*
5943 * scx_root clearing must be inside cpus_read_lock(). See
5944 * handle_hotplug().
5945 */
5946 cpus_read_lock();
5947 RCU_INIT_POINTER(scx_root, NULL);
5948 cpus_read_unlock();
5949
5950 /*
5951 * Delete the kobject from the hierarchy synchronously. Otherwise, sysfs
5952 * could observe an object of the same name still in the hierarchy when
5953 * the next scheduler is loaded.
5954 */
5955 #ifdef CONFIG_EXT_SUB_SCHED
5956 if (sch->sub_kset)
5957 kset_unregister(sch->sub_kset);
5958 #endif
5959 kobject_del(&sch->kobj);
5960
5961 free_kick_syncs();
5962
5963 mutex_unlock(&scx_enable_mutex);
5964
5965 WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
5966 done:
5967 scx_bypass(sch, false);
5968 }
5969
5970 /*
5971 * Claim the exit on @sch. The caller must ensure that the helper kthread work
5972 * is kicked before the current task can be preempted. Once exit_kind is
5973 * claimed, scx_error() can no longer trigger, so if the current task gets
5974 * preempted and the BPF scheduler fails to schedule it back, the helper work
5975 * will never be kicked and the whole system can wedge.
5976 */
scx_claim_exit(struct scx_sched * sch,enum scx_exit_kind kind)5977 static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind)
5978 {
5979 int none = SCX_EXIT_NONE;
5980
5981 lockdep_assert_preemption_disabled();
5982
5983 if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
5984 kind = SCX_EXIT_ERROR;
5985
5986 if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
5987 return false;
5988
5989 /*
5990 * Some CPUs may be trapped in the dispatch paths. Set the aborting
5991 * flag to break potential live-lock scenarios, ensuring we can
5992 * successfully reach scx_bypass().
5993 */
5994 WRITE_ONCE(sch->aborting, true);
5995
5996 /*
5997 * Propagate exits to descendants immediately. Each has a dedicated
5998 * helper kthread and can run in parallel. While most of disabling is
5999 * serialized, running them in separate threads allows parallelizing
6000 * ops.exit(), which can take arbitrarily long prolonging bypass mode.
6001 *
6002 * To guarantee forward progress, this propagation must be in-line so
6003 * that ->aborting is synchronously asserted for all sub-scheds. The
6004 * propagation is also the interlocking point against sub-sched
6005 * attachment. See scx_link_sched().
6006 *
6007 * This doesn't cause recursions as propagation only takes place for
6008 * non-propagation exits.
6009 */
6010 if (kind != SCX_EXIT_PARENT) {
6011 scoped_guard (raw_spinlock_irqsave, &scx_sched_lock) {
6012 struct scx_sched *pos;
6013 scx_for_each_descendant_pre(pos, sch)
6014 scx_disable(pos, SCX_EXIT_PARENT);
6015 }
6016 }
6017
6018 return true;
6019 }
6020
scx_disable_workfn(struct kthread_work * work)6021 static void scx_disable_workfn(struct kthread_work *work)
6022 {
6023 struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
6024 struct scx_exit_info *ei = sch->exit_info;
6025 int kind;
6026
6027 kind = atomic_read(&sch->exit_kind);
6028 while (true) {
6029 if (kind == SCX_EXIT_DONE) /* already disabled? */
6030 return;
6031 WARN_ON_ONCE(kind == SCX_EXIT_NONE);
6032 if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE))
6033 break;
6034 }
6035 ei->kind = kind;
6036 ei->reason = scx_exit_reason(ei->kind);
6037
6038 if (scx_parent(sch))
6039 scx_sub_disable(sch);
6040 else
6041 scx_root_disable(sch);
6042 }
6043
scx_disable(struct scx_sched * sch,enum scx_exit_kind kind)6044 static void scx_disable(struct scx_sched *sch, enum scx_exit_kind kind)
6045 {
6046 guard(preempt)();
6047 if (scx_claim_exit(sch, kind))
6048 irq_work_queue(&sch->disable_irq_work);
6049 }
6050
6051 /**
6052 * scx_flush_disable_work - flush the disable work and wait for it to finish
6053 * @sch: the scheduler
6054 *
6055 * sch->disable_work might still not queued, causing kthread_flush_work()
6056 * as a noop. Syncing the irq_work first is required to guarantee the
6057 * kthread work has been queued before waiting for it.
6058 */
scx_flush_disable_work(struct scx_sched * sch)6059 static void scx_flush_disable_work(struct scx_sched *sch)
6060 {
6061 int kind;
6062
6063 do {
6064 irq_work_sync(&sch->disable_irq_work);
6065 kthread_flush_work(&sch->disable_work);
6066 kind = atomic_read(&sch->exit_kind);
6067 } while (kind != SCX_EXIT_NONE && kind != SCX_EXIT_DONE);
6068 }
6069
dump_newline(struct seq_buf * s)6070 static void dump_newline(struct seq_buf *s)
6071 {
6072 trace_sched_ext_dump("");
6073
6074 /* @s may be zero sized and seq_buf triggers WARN if so */
6075 if (s->size)
6076 seq_buf_putc(s, '\n');
6077 }
6078
dump_line(struct seq_buf * s,const char * fmt,...)6079 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
6080 {
6081 va_list args;
6082
6083 #ifdef CONFIG_TRACEPOINTS
6084 if (trace_sched_ext_dump_enabled()) {
6085 /* protected by scx_dump_lock */
6086 static char line_buf[SCX_EXIT_MSG_LEN];
6087
6088 va_start(args, fmt);
6089 vscnprintf(line_buf, sizeof(line_buf), fmt, args);
6090 va_end(args);
6091
6092 trace_call__sched_ext_dump(line_buf);
6093 }
6094 #endif
6095 /* @s may be zero sized and seq_buf triggers WARN if so */
6096 if (s->size) {
6097 va_start(args, fmt);
6098 seq_buf_vprintf(s, fmt, args);
6099 va_end(args);
6100
6101 seq_buf_putc(s, '\n');
6102 }
6103 }
6104
dump_stack_trace(struct seq_buf * s,const char * prefix,const unsigned long * bt,unsigned int len)6105 static void dump_stack_trace(struct seq_buf *s, const char *prefix,
6106 const unsigned long *bt, unsigned int len)
6107 {
6108 unsigned int i;
6109
6110 for (i = 0; i < len; i++)
6111 dump_line(s, "%s%pS", prefix, (void *)bt[i]);
6112 }
6113
ops_dump_init(struct seq_buf * s,const char * prefix)6114 static void ops_dump_init(struct seq_buf *s, const char *prefix)
6115 {
6116 struct scx_dump_data *dd = &scx_dump_data;
6117
6118 lockdep_assert_irqs_disabled();
6119
6120 dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */
6121 dd->first = true;
6122 dd->cursor = 0;
6123 dd->s = s;
6124 dd->prefix = prefix;
6125 }
6126
ops_dump_flush(void)6127 static void ops_dump_flush(void)
6128 {
6129 struct scx_dump_data *dd = &scx_dump_data;
6130 char *line = dd->buf.line;
6131
6132 if (!dd->cursor)
6133 return;
6134
6135 /*
6136 * There's something to flush and this is the first line. Insert a blank
6137 * line to distinguish ops dump.
6138 */
6139 if (dd->first) {
6140 dump_newline(dd->s);
6141 dd->first = false;
6142 }
6143
6144 /*
6145 * There may be multiple lines in $line. Scan and emit each line
6146 * separately.
6147 */
6148 while (true) {
6149 char *end = line;
6150 char c;
6151
6152 while (*end != '\n' && *end != '\0')
6153 end++;
6154
6155 /*
6156 * If $line overflowed, it may not have newline at the end.
6157 * Always emit with a newline.
6158 */
6159 c = *end;
6160 *end = '\0';
6161 dump_line(dd->s, "%s%s", dd->prefix, line);
6162 if (c == '\0')
6163 break;
6164
6165 /* move to the next line */
6166 end++;
6167 if (*end == '\0')
6168 break;
6169 line = end;
6170 }
6171
6172 dd->cursor = 0;
6173 }
6174
ops_dump_exit(void)6175 static void ops_dump_exit(void)
6176 {
6177 ops_dump_flush();
6178 scx_dump_data.cpu = -1;
6179 }
6180
scx_dump_task(struct scx_sched * sch,struct seq_buf * s,struct scx_dump_ctx * dctx,struct rq * rq,struct task_struct * p,char marker)6181 static void scx_dump_task(struct scx_sched *sch, struct seq_buf *s, struct scx_dump_ctx *dctx,
6182 struct rq *rq, struct task_struct *p, char marker)
6183 {
6184 static unsigned long bt[SCX_EXIT_BT_LEN];
6185 struct scx_sched *task_sch = scx_task_sched(p);
6186 const char *own_marker;
6187 char sch_id_buf[32];
6188 char dsq_id_buf[19] = "(n/a)";
6189 unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
6190 unsigned int bt_len = 0;
6191
6192 own_marker = task_sch == sch ? "*" : "";
6193
6194 if (task_sch->level == 0)
6195 scnprintf(sch_id_buf, sizeof(sch_id_buf), "root");
6196 else
6197 scnprintf(sch_id_buf, sizeof(sch_id_buf), "sub%d-%llu",
6198 task_sch->level, task_sch->ops.sub_cgroup_id);
6199
6200 if (p->scx.dsq)
6201 scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
6202 (unsigned long long)p->scx.dsq->id);
6203
6204 dump_newline(s);
6205 dump_line(s, " %c%c %s[%d] %s%s %+ldms",
6206 marker, task_state_to_char(p), p->comm, p->pid,
6207 own_marker, sch_id_buf,
6208 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
6209 dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
6210 scx_get_task_state(p) >> SCX_TASK_STATE_SHIFT,
6211 p->scx.flags & ~SCX_TASK_STATE_MASK,
6212 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
6213 ops_state >> SCX_OPSS_QSEQ_SHIFT);
6214 dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s",
6215 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf);
6216 dump_line(s, " dsq_vtime=%llu slice=%llu weight=%u",
6217 p->scx.dsq_vtime, p->scx.slice, p->scx.weight);
6218 dump_line(s, " cpus=%*pb no_mig=%u", cpumask_pr_args(p->cpus_ptr),
6219 p->migration_disabled);
6220
6221 if (SCX_HAS_OP(sch, dump_task)) {
6222 ops_dump_init(s, " ");
6223 SCX_CALL_OP(sch, dump_task, rq, dctx, p);
6224 ops_dump_exit();
6225 }
6226
6227 #ifdef CONFIG_STACKTRACE
6228 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
6229 #endif
6230 if (bt_len) {
6231 dump_newline(s);
6232 dump_stack_trace(s, " ", bt, bt_len);
6233 }
6234 }
6235
6236 /*
6237 * Dump scheduler state. If @dump_all_tasks is true, dump all tasks regardless
6238 * of which scheduler they belong to. If false, only dump tasks owned by @sch.
6239 * For SysRq-D dumps, @dump_all_tasks=false since all schedulers are dumped
6240 * separately. For error dumps, @dump_all_tasks=true since only the failing
6241 * scheduler is dumped.
6242 */
scx_dump_state(struct scx_sched * sch,struct scx_exit_info * ei,size_t dump_len,bool dump_all_tasks)6243 static void scx_dump_state(struct scx_sched *sch, struct scx_exit_info *ei,
6244 size_t dump_len, bool dump_all_tasks)
6245 {
6246 static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
6247 struct scx_dump_ctx dctx = {
6248 .kind = ei->kind,
6249 .exit_code = ei->exit_code,
6250 .reason = ei->reason,
6251 .at_ns = ktime_get_ns(),
6252 .at_jiffies = jiffies,
6253 };
6254 struct seq_buf s;
6255 struct scx_event_stats events;
6256 char *buf;
6257 int cpu;
6258
6259 guard(raw_spinlock_irqsave)(&scx_dump_lock);
6260
6261 if (sch->dump_disabled)
6262 return;
6263
6264 seq_buf_init(&s, ei->dump, dump_len);
6265
6266 #ifdef CONFIG_EXT_SUB_SCHED
6267 if (sch->level == 0)
6268 dump_line(&s, "%s: root", sch->ops.name);
6269 else
6270 dump_line(&s, "%s: sub%d-%llu %s",
6271 sch->ops.name, sch->level, sch->ops.sub_cgroup_id,
6272 sch->cgrp_path);
6273 #endif
6274 if (ei->kind == SCX_EXIT_NONE) {
6275 dump_line(&s, "Debug dump triggered by %s", ei->reason);
6276 } else {
6277 dump_line(&s, "%s[%d] triggered exit kind %d:",
6278 current->comm, current->pid, ei->kind);
6279 dump_line(&s, " %s (%s)", ei->reason, ei->msg);
6280 dump_newline(&s);
6281 dump_line(&s, "Backtrace:");
6282 dump_stack_trace(&s, " ", ei->bt, ei->bt_len);
6283 }
6284
6285 if (SCX_HAS_OP(sch, dump)) {
6286 ops_dump_init(&s, "");
6287 SCX_CALL_OP(sch, dump, NULL, &dctx);
6288 ops_dump_exit();
6289 }
6290
6291 dump_newline(&s);
6292 dump_line(&s, "CPU states");
6293 dump_line(&s, "----------");
6294
6295 for_each_possible_cpu(cpu) {
6296 struct rq *rq = cpu_rq(cpu);
6297 struct rq_flags rf;
6298 struct task_struct *p;
6299 struct seq_buf ns;
6300 size_t avail, used;
6301 bool idle;
6302
6303 rq_lock_irqsave(rq, &rf);
6304
6305 idle = list_empty(&rq->scx.runnable_list) &&
6306 rq->curr->sched_class == &idle_sched_class;
6307
6308 if (idle && !SCX_HAS_OP(sch, dump_cpu))
6309 goto next;
6310
6311 /*
6312 * We don't yet know whether ops.dump_cpu() will produce output
6313 * and we may want to skip the default CPU dump if it doesn't.
6314 * Use a nested seq_buf to generate the standard dump so that we
6315 * can decide whether to commit later.
6316 */
6317 avail = seq_buf_get_buf(&s, &buf);
6318 seq_buf_init(&ns, buf, avail);
6319
6320 dump_newline(&ns);
6321 dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu ksync=%lu",
6322 cpu, rq->scx.nr_running, rq->scx.flags,
6323 rq->scx.cpu_released, rq->scx.ops_qseq,
6324 rq->scx.kick_sync);
6325 dump_line(&ns, " curr=%s[%d] class=%ps",
6326 rq->curr->comm, rq->curr->pid,
6327 rq->curr->sched_class);
6328 if (!cpumask_empty(rq->scx.cpus_to_kick))
6329 dump_line(&ns, " cpus_to_kick : %*pb",
6330 cpumask_pr_args(rq->scx.cpus_to_kick));
6331 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
6332 dump_line(&ns, " idle_to_kick : %*pb",
6333 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
6334 if (!cpumask_empty(rq->scx.cpus_to_preempt))
6335 dump_line(&ns, " cpus_to_preempt: %*pb",
6336 cpumask_pr_args(rq->scx.cpus_to_preempt));
6337 if (!cpumask_empty(rq->scx.cpus_to_wait))
6338 dump_line(&ns, " cpus_to_wait : %*pb",
6339 cpumask_pr_args(rq->scx.cpus_to_wait));
6340 if (!cpumask_empty(rq->scx.cpus_to_sync))
6341 dump_line(&ns, " cpus_to_sync : %*pb",
6342 cpumask_pr_args(rq->scx.cpus_to_sync));
6343
6344 used = seq_buf_used(&ns);
6345 if (SCX_HAS_OP(sch, dump_cpu)) {
6346 ops_dump_init(&ns, " ");
6347 SCX_CALL_OP(sch, dump_cpu, rq, &dctx, cpu, idle);
6348 ops_dump_exit();
6349 }
6350
6351 /*
6352 * If idle && nothing generated by ops.dump_cpu(), there's
6353 * nothing interesting. Skip.
6354 */
6355 if (idle && used == seq_buf_used(&ns))
6356 goto next;
6357
6358 /*
6359 * $s may already have overflowed when $ns was created. If so,
6360 * calling commit on it will trigger BUG.
6361 */
6362 if (avail) {
6363 seq_buf_commit(&s, seq_buf_used(&ns));
6364 if (seq_buf_has_overflowed(&ns))
6365 seq_buf_set_overflow(&s);
6366 }
6367
6368 if (rq->curr->sched_class == &ext_sched_class &&
6369 (dump_all_tasks || scx_task_on_sched(sch, rq->curr)))
6370 scx_dump_task(sch, &s, &dctx, rq, rq->curr, '*');
6371
6372 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
6373 if (dump_all_tasks || scx_task_on_sched(sch, p))
6374 scx_dump_task(sch, &s, &dctx, rq, p, ' ');
6375 next:
6376 rq_unlock_irqrestore(rq, &rf);
6377 }
6378
6379 dump_newline(&s);
6380 dump_line(&s, "Event counters");
6381 dump_line(&s, "--------------");
6382
6383 scx_read_events(sch, &events);
6384 scx_dump_event(s, &events, SCX_EV_SELECT_CPU_FALLBACK);
6385 scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
6386 scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST);
6387 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_EXITING);
6388 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
6389 scx_dump_event(s, &events, SCX_EV_REENQ_IMMED);
6390 scx_dump_event(s, &events, SCX_EV_REENQ_LOCAL_REPEAT);
6391 scx_dump_event(s, &events, SCX_EV_REFILL_SLICE_DFL);
6392 scx_dump_event(s, &events, SCX_EV_BYPASS_DURATION);
6393 scx_dump_event(s, &events, SCX_EV_BYPASS_DISPATCH);
6394 scx_dump_event(s, &events, SCX_EV_BYPASS_ACTIVATE);
6395 scx_dump_event(s, &events, SCX_EV_INSERT_NOT_OWNED);
6396 scx_dump_event(s, &events, SCX_EV_SUB_BYPASS_DISPATCH);
6397
6398 if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
6399 memcpy(ei->dump + dump_len - sizeof(trunc_marker),
6400 trunc_marker, sizeof(trunc_marker));
6401 }
6402
scx_disable_irq_workfn(struct irq_work * irq_work)6403 static void scx_disable_irq_workfn(struct irq_work *irq_work)
6404 {
6405 struct scx_sched *sch = container_of(irq_work, struct scx_sched, disable_irq_work);
6406 struct scx_exit_info *ei = sch->exit_info;
6407
6408 if (ei->kind >= SCX_EXIT_ERROR)
6409 scx_dump_state(sch, ei, sch->ops.exit_dump_len, true);
6410
6411 kthread_queue_work(sch->helper, &sch->disable_work);
6412 }
6413
scx_vexit(struct scx_sched * sch,enum scx_exit_kind kind,s64 exit_code,const char * fmt,va_list args)6414 static bool scx_vexit(struct scx_sched *sch,
6415 enum scx_exit_kind kind, s64 exit_code,
6416 const char *fmt, va_list args)
6417 {
6418 struct scx_exit_info *ei = sch->exit_info;
6419
6420 guard(preempt)();
6421
6422 if (!scx_claim_exit(sch, kind))
6423 return false;
6424
6425 ei->exit_code = exit_code;
6426 #ifdef CONFIG_STACKTRACE
6427 if (kind >= SCX_EXIT_ERROR)
6428 ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
6429 #endif
6430 vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
6431
6432 /*
6433 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
6434 * in scx_disable_workfn().
6435 */
6436 ei->kind = kind;
6437 ei->reason = scx_exit_reason(ei->kind);
6438
6439 irq_work_queue(&sch->disable_irq_work);
6440 return true;
6441 }
6442
alloc_kick_syncs(void)6443 static int alloc_kick_syncs(void)
6444 {
6445 int cpu;
6446
6447 /*
6448 * Allocate per-CPU arrays sized by nr_cpu_ids. Use kvzalloc as size
6449 * can exceed percpu allocator limits on large machines.
6450 */
6451 for_each_possible_cpu(cpu) {
6452 struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu);
6453 struct scx_kick_syncs *new_ksyncs;
6454
6455 WARN_ON_ONCE(rcu_access_pointer(*ksyncs));
6456
6457 new_ksyncs = kvzalloc_node(struct_size(new_ksyncs, syncs, nr_cpu_ids),
6458 GFP_KERNEL, cpu_to_node(cpu));
6459 if (!new_ksyncs) {
6460 free_kick_syncs();
6461 return -ENOMEM;
6462 }
6463
6464 rcu_assign_pointer(*ksyncs, new_ksyncs);
6465 }
6466
6467 return 0;
6468 }
6469
free_pnode(struct scx_sched_pnode * pnode)6470 static void free_pnode(struct scx_sched_pnode *pnode)
6471 {
6472 if (!pnode)
6473 return;
6474 exit_dsq(&pnode->global_dsq);
6475 kfree(pnode);
6476 }
6477
alloc_pnode(struct scx_sched * sch,int node)6478 static struct scx_sched_pnode *alloc_pnode(struct scx_sched *sch, int node)
6479 {
6480 struct scx_sched_pnode *pnode;
6481
6482 pnode = kzalloc_node(sizeof(*pnode), GFP_KERNEL, node);
6483 if (!pnode)
6484 return NULL;
6485
6486 if (init_dsq(&pnode->global_dsq, SCX_DSQ_GLOBAL, sch)) {
6487 kfree(pnode);
6488 return NULL;
6489 }
6490
6491 return pnode;
6492 }
6493
6494 /*
6495 * Allocate and initialize a new scx_sched. @cgrp's reference is always
6496 * consumed whether the function succeeds or fails.
6497 */
scx_alloc_and_add_sched(struct sched_ext_ops * ops,struct cgroup * cgrp,struct scx_sched * parent)6498 static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
6499 struct cgroup *cgrp,
6500 struct scx_sched *parent)
6501 {
6502 struct scx_sched *sch;
6503 s32 level = parent ? parent->level + 1 : 0;
6504 s32 node, cpu, ret, bypass_fail_cpu = nr_cpu_ids;
6505
6506 sch = kzalloc_flex(*sch, ancestors, level + 1);
6507 if (!sch) {
6508 ret = -ENOMEM;
6509 goto err_put_cgrp;
6510 }
6511
6512 sch->exit_info = alloc_exit_info(ops->exit_dump_len);
6513 if (!sch->exit_info) {
6514 ret = -ENOMEM;
6515 goto err_free_sch;
6516 }
6517
6518 ret = rhashtable_init(&sch->dsq_hash, &dsq_hash_params);
6519 if (ret < 0)
6520 goto err_free_ei;
6521
6522 sch->pnode = kzalloc_objs(sch->pnode[0], nr_node_ids);
6523 if (!sch->pnode) {
6524 ret = -ENOMEM;
6525 goto err_free_hash;
6526 }
6527
6528 for_each_node_state(node, N_POSSIBLE) {
6529 sch->pnode[node] = alloc_pnode(sch, node);
6530 if (!sch->pnode[node]) {
6531 ret = -ENOMEM;
6532 goto err_free_pnode;
6533 }
6534 }
6535
6536 sch->dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
6537 sch->pcpu = __alloc_percpu(struct_size_t(struct scx_sched_pcpu,
6538 dsp_ctx.buf, sch->dsp_max_batch),
6539 __alignof__(struct scx_sched_pcpu));
6540 if (!sch->pcpu) {
6541 ret = -ENOMEM;
6542 goto err_free_pnode;
6543 }
6544
6545 for_each_possible_cpu(cpu) {
6546 ret = init_dsq(bypass_dsq(sch, cpu), SCX_DSQ_BYPASS, sch);
6547 if (ret) {
6548 bypass_fail_cpu = cpu;
6549 goto err_free_pcpu;
6550 }
6551 }
6552
6553 for_each_possible_cpu(cpu) {
6554 struct scx_sched_pcpu *pcpu = per_cpu_ptr(sch->pcpu, cpu);
6555
6556 pcpu->sch = sch;
6557 INIT_LIST_HEAD(&pcpu->deferred_reenq_local.node);
6558 }
6559
6560 sch->helper = kthread_run_worker(0, "sched_ext_helper");
6561 if (IS_ERR(sch->helper)) {
6562 ret = PTR_ERR(sch->helper);
6563 goto err_free_pcpu;
6564 }
6565
6566 sched_set_fifo(sch->helper->task);
6567
6568 if (parent)
6569 memcpy(sch->ancestors, parent->ancestors,
6570 level * sizeof(parent->ancestors[0]));
6571 sch->ancestors[level] = sch;
6572 sch->level = level;
6573
6574 if (ops->timeout_ms)
6575 sch->watchdog_timeout = msecs_to_jiffies(ops->timeout_ms);
6576 else
6577 sch->watchdog_timeout = SCX_WATCHDOG_MAX_TIMEOUT;
6578
6579 sch->slice_dfl = SCX_SLICE_DFL;
6580 atomic_set(&sch->exit_kind, SCX_EXIT_NONE);
6581 init_irq_work(&sch->disable_irq_work, scx_disable_irq_workfn);
6582 kthread_init_work(&sch->disable_work, scx_disable_workfn);
6583 timer_setup(&sch->bypass_lb_timer, scx_bypass_lb_timerfn, 0);
6584
6585 if (!alloc_cpumask_var(&sch->bypass_lb_donee_cpumask, GFP_KERNEL)) {
6586 ret = -ENOMEM;
6587 goto err_stop_helper;
6588 }
6589 if (!alloc_cpumask_var(&sch->bypass_lb_resched_cpumask, GFP_KERNEL)) {
6590 ret = -ENOMEM;
6591 goto err_free_lb_cpumask;
6592 }
6593 sch->ops = *ops;
6594 rcu_assign_pointer(ops->priv, sch);
6595
6596 sch->kobj.kset = scx_kset;
6597
6598 #ifdef CONFIG_EXT_SUB_SCHED
6599 char *buf = kzalloc(PATH_MAX, GFP_KERNEL);
6600 if (!buf) {
6601 ret = -ENOMEM;
6602 goto err_free_lb_resched;
6603 }
6604 cgroup_path(cgrp, buf, PATH_MAX);
6605 sch->cgrp_path = kstrdup(buf, GFP_KERNEL);
6606 kfree(buf);
6607 if (!sch->cgrp_path) {
6608 ret = -ENOMEM;
6609 goto err_free_lb_resched;
6610 }
6611
6612 sch->cgrp = cgrp;
6613 INIT_LIST_HEAD(&sch->children);
6614 INIT_LIST_HEAD(&sch->sibling);
6615
6616 if (parent)
6617 ret = kobject_init_and_add(&sch->kobj, &scx_ktype,
6618 &parent->sub_kset->kobj,
6619 "sub-%llu", cgroup_id(cgrp));
6620 else
6621 ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root");
6622
6623 if (ret < 0) {
6624 kobject_put(&sch->kobj);
6625 return ERR_PTR(ret);
6626 }
6627
6628 if (ops->sub_attach) {
6629 sch->sub_kset = kset_create_and_add("sub", NULL, &sch->kobj);
6630 if (!sch->sub_kset) {
6631 kobject_put(&sch->kobj);
6632 return ERR_PTR(-ENOMEM);
6633 }
6634 }
6635 #else /* CONFIG_EXT_SUB_SCHED */
6636 ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root");
6637 if (ret < 0) {
6638 kobject_put(&sch->kobj);
6639 return ERR_PTR(ret);
6640 }
6641 #endif /* CONFIG_EXT_SUB_SCHED */
6642 return sch;
6643
6644 err_free_lb_resched:
6645 free_cpumask_var(sch->bypass_lb_resched_cpumask);
6646 err_free_lb_cpumask:
6647 free_cpumask_var(sch->bypass_lb_donee_cpumask);
6648 err_stop_helper:
6649 kthread_destroy_worker(sch->helper);
6650 err_free_pcpu:
6651 for_each_possible_cpu(cpu) {
6652 if (cpu == bypass_fail_cpu)
6653 break;
6654 exit_dsq(bypass_dsq(sch, cpu));
6655 }
6656 free_percpu(sch->pcpu);
6657 err_free_pnode:
6658 for_each_node_state(node, N_POSSIBLE)
6659 free_pnode(sch->pnode[node]);
6660 kfree(sch->pnode);
6661 err_free_hash:
6662 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
6663 err_free_ei:
6664 free_exit_info(sch->exit_info);
6665 err_free_sch:
6666 kfree(sch);
6667 err_put_cgrp:
6668 #ifdef CONFIG_EXT_SUB_SCHED
6669 cgroup_put(cgrp);
6670 #endif
6671 return ERR_PTR(ret);
6672 }
6673
check_hotplug_seq(struct scx_sched * sch,const struct sched_ext_ops * ops)6674 static int check_hotplug_seq(struct scx_sched *sch,
6675 const struct sched_ext_ops *ops)
6676 {
6677 unsigned long long global_hotplug_seq;
6678
6679 /*
6680 * If a hotplug event has occurred between when a scheduler was
6681 * initialized, and when we were able to attach, exit and notify user
6682 * space about it.
6683 */
6684 if (ops->hotplug_seq) {
6685 global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
6686 if (ops->hotplug_seq != global_hotplug_seq) {
6687 scx_exit(sch, SCX_EXIT_UNREG_KERN,
6688 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
6689 "expected hotplug seq %llu did not match actual %llu",
6690 ops->hotplug_seq, global_hotplug_seq);
6691 return -EBUSY;
6692 }
6693 }
6694
6695 return 0;
6696 }
6697
validate_ops(struct scx_sched * sch,const struct sched_ext_ops * ops)6698 static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops)
6699 {
6700 /*
6701 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
6702 * ops.enqueue() callback isn't implemented.
6703 */
6704 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
6705 scx_error(sch, "SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
6706 return -EINVAL;
6707 }
6708
6709 /*
6710 * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle
6711 * selection policy to be enabled.
6712 */
6713 if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) &&
6714 (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) {
6715 scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled");
6716 return -EINVAL;
6717 }
6718
6719 if (ops->cpu_acquire || ops->cpu_release)
6720 pr_warn("ops->cpu_acquire/release() are deprecated, use sched_switch TP instead\n");
6721
6722 return 0;
6723 }
6724
6725 /*
6726 * scx_enable() is offloaded to a dedicated system-wide RT kthread to avoid
6727 * starvation. During the READY -> ENABLED task switching loop, the calling
6728 * thread's sched_class gets switched from fair to ext. As fair has higher
6729 * priority than ext, the calling thread can be indefinitely starved under
6730 * fair-class saturation, leading to a system hang.
6731 */
6732 struct scx_enable_cmd {
6733 struct kthread_work work;
6734 struct sched_ext_ops *ops;
6735 int ret;
6736 };
6737
scx_root_enable_workfn(struct kthread_work * work)6738 static void scx_root_enable_workfn(struct kthread_work *work)
6739 {
6740 struct scx_enable_cmd *cmd = container_of(work, struct scx_enable_cmd, work);
6741 struct sched_ext_ops *ops = cmd->ops;
6742 struct cgroup *cgrp = root_cgroup();
6743 struct scx_sched *sch;
6744 struct scx_task_iter sti;
6745 struct task_struct *p;
6746 int i, cpu, ret;
6747
6748 mutex_lock(&scx_enable_mutex);
6749
6750 if (scx_enable_state() != SCX_DISABLED) {
6751 ret = -EBUSY;
6752 goto err_unlock;
6753 }
6754
6755 ret = alloc_kick_syncs();
6756 if (ret)
6757 goto err_unlock;
6758
6759 #ifdef CONFIG_EXT_SUB_SCHED
6760 cgroup_get(cgrp);
6761 #endif
6762 sch = scx_alloc_and_add_sched(ops, cgrp, NULL);
6763 if (IS_ERR(sch)) {
6764 ret = PTR_ERR(sch);
6765 goto err_free_ksyncs;
6766 }
6767
6768 /*
6769 * Transition to ENABLING and clear exit info to arm the disable path.
6770 * Failure triggers full disabling from here on.
6771 */
6772 WARN_ON_ONCE(scx_set_enable_state(SCX_ENABLING) != SCX_DISABLED);
6773 WARN_ON_ONCE(scx_root);
6774
6775 atomic_long_set(&scx_nr_rejected, 0);
6776
6777 for_each_possible_cpu(cpu) {
6778 struct rq *rq = cpu_rq(cpu);
6779
6780 rq->scx.local_dsq.sched = sch;
6781 rq->scx.cpuperf_target = SCX_CPUPERF_ONE;
6782 }
6783
6784 /*
6785 * Keep CPUs stable during enable so that the BPF scheduler can track
6786 * online CPUs by watching ->on/offline_cpu() after ->init().
6787 */
6788 cpus_read_lock();
6789
6790 /*
6791 * Make the scheduler instance visible. Must be inside cpus_read_lock().
6792 * See handle_hotplug().
6793 */
6794 rcu_assign_pointer(scx_root, sch);
6795
6796 ret = scx_link_sched(sch);
6797 if (ret) {
6798 cpus_read_unlock();
6799 goto err_disable;
6800 }
6801
6802 scx_idle_enable(ops);
6803
6804 if (sch->ops.init) {
6805 ret = SCX_CALL_OP_RET(sch, init, NULL);
6806 if (ret) {
6807 ret = ops_sanitize_err(sch, "init", ret);
6808 cpus_read_unlock();
6809 scx_error(sch, "ops.init() failed (%d)", ret);
6810 goto err_disable;
6811 }
6812 sch->exit_info->flags |= SCX_EFLAG_INITIALIZED;
6813 }
6814
6815 for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
6816 if (((void (**)(void))ops)[i])
6817 set_bit(i, sch->has_op);
6818
6819 ret = check_hotplug_seq(sch, ops);
6820 if (ret) {
6821 cpus_read_unlock();
6822 goto err_disable;
6823 }
6824 scx_idle_update_selcpu_topology(ops);
6825
6826 cpus_read_unlock();
6827
6828 ret = validate_ops(sch, ops);
6829 if (ret)
6830 goto err_disable;
6831
6832 /*
6833 * Once __scx_enabled is set, %current can be switched to SCX anytime.
6834 * This can lead to stalls as some BPF schedulers (e.g. userspace
6835 * scheduling) may not function correctly before all tasks are switched.
6836 * Init in bypass mode to guarantee forward progress.
6837 */
6838 scx_bypass(sch, true);
6839
6840 for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
6841 if (((void (**)(void))ops)[i])
6842 set_bit(i, sch->has_op);
6843
6844 if (sch->ops.cpu_acquire || sch->ops.cpu_release)
6845 sch->ops.flags |= SCX_OPS_HAS_CPU_PREEMPT;
6846
6847 /*
6848 * Lock out forks, cgroup on/offlining and moves before opening the
6849 * floodgate so that they don't wander into the operations prematurely.
6850 */
6851 percpu_down_write(&scx_fork_rwsem);
6852
6853 WARN_ON_ONCE(scx_init_task_enabled);
6854 scx_init_task_enabled = true;
6855
6856 /*
6857 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
6858 * preventing new tasks from being added. No need to exclude tasks
6859 * leaving as sched_ext_free() can handle both prepped and enabled
6860 * tasks. Prep all tasks first and then enable them with preemption
6861 * disabled.
6862 *
6863 * All cgroups should be initialized before scx_init_task() so that the
6864 * BPF scheduler can reliably track each task's cgroup membership from
6865 * scx_init_task(). Lock out cgroup on/offlining and task migrations
6866 * while tasks are being initialized so that scx_cgroup_can_attach()
6867 * never sees uninitialized tasks.
6868 */
6869 scx_cgroup_lock();
6870 set_cgroup_sched(sch_cgroup(sch), sch);
6871 ret = scx_cgroup_init(sch);
6872 if (ret)
6873 goto err_disable_unlock_all;
6874
6875 scx_task_iter_start(&sti, NULL);
6876 while ((p = scx_task_iter_next_locked(&sti))) {
6877 /*
6878 * @p may already be dead, have lost all its usages counts and
6879 * be waiting for RCU grace period before being freed. @p can't
6880 * be initialized for SCX in such cases and should be ignored.
6881 */
6882 if (!tryget_task_struct(p))
6883 continue;
6884
6885 scx_task_iter_unlock(&sti);
6886
6887 ret = scx_init_task(sch, p, false);
6888 if (ret) {
6889 put_task_struct(p);
6890 scx_task_iter_stop(&sti);
6891 scx_error(sch, "ops.init_task() failed (%d) for %s[%d]",
6892 ret, p->comm, p->pid);
6893 goto err_disable_unlock_all;
6894 }
6895
6896 scx_set_task_sched(p, sch);
6897 scx_set_task_state(p, SCX_TASK_READY);
6898
6899 put_task_struct(p);
6900 }
6901 scx_task_iter_stop(&sti);
6902 scx_cgroup_unlock();
6903 percpu_up_write(&scx_fork_rwsem);
6904
6905 /*
6906 * All tasks are READY. It's safe to turn on scx_enabled() and switch
6907 * all eligible tasks.
6908 */
6909 WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
6910 static_branch_enable(&__scx_enabled);
6911
6912 /*
6913 * We're fully committed and can't fail. The task READY -> ENABLED
6914 * transitions here are synchronized against sched_ext_free() through
6915 * scx_tasks_lock.
6916 */
6917 percpu_down_write(&scx_fork_rwsem);
6918 scx_task_iter_start(&sti, NULL);
6919 while ((p = scx_task_iter_next_locked(&sti))) {
6920 unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
6921 const struct sched_class *old_class = p->sched_class;
6922 const struct sched_class *new_class = scx_setscheduler_class(p);
6923
6924 if (scx_get_task_state(p) != SCX_TASK_READY)
6925 continue;
6926
6927 if (old_class != new_class)
6928 queue_flags |= DEQUEUE_CLASS;
6929
6930 scoped_guard (sched_change, p, queue_flags) {
6931 p->scx.slice = READ_ONCE(sch->slice_dfl);
6932 p->sched_class = new_class;
6933 }
6934 }
6935 scx_task_iter_stop(&sti);
6936 percpu_up_write(&scx_fork_rwsem);
6937
6938 scx_bypass(sch, false);
6939
6940 if (!scx_tryset_enable_state(SCX_ENABLED, SCX_ENABLING)) {
6941 WARN_ON_ONCE(atomic_read(&sch->exit_kind) == SCX_EXIT_NONE);
6942 goto err_disable;
6943 }
6944
6945 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
6946 static_branch_enable(&__scx_switched_all);
6947
6948 pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
6949 sch->ops.name, scx_switched_all() ? "" : " (partial)");
6950 kobject_uevent(&sch->kobj, KOBJ_ADD);
6951 mutex_unlock(&scx_enable_mutex);
6952
6953 atomic_long_inc(&scx_enable_seq);
6954
6955 cmd->ret = 0;
6956 return;
6957
6958 err_free_ksyncs:
6959 free_kick_syncs();
6960 err_unlock:
6961 mutex_unlock(&scx_enable_mutex);
6962 cmd->ret = ret;
6963 return;
6964
6965 err_disable_unlock_all:
6966 scx_cgroup_unlock();
6967 percpu_up_write(&scx_fork_rwsem);
6968 /* we'll soon enter disable path, keep bypass on */
6969 err_disable:
6970 mutex_unlock(&scx_enable_mutex);
6971 /*
6972 * Returning an error code here would not pass all the error information
6973 * to userspace. Record errno using scx_error() for cases scx_error()
6974 * wasn't already invoked and exit indicating success so that the error
6975 * is notified through ops.exit() with all the details.
6976 *
6977 * Flush scx_disable_work to ensure that error is reported before init
6978 * completion. sch's base reference will be put by bpf_scx_unreg().
6979 */
6980 scx_error(sch, "scx_root_enable() failed (%d)", ret);
6981 scx_flush_disable_work(sch);
6982 cmd->ret = 0;
6983 }
6984
6985 #ifdef CONFIG_EXT_SUB_SCHED
6986 /* verify that a scheduler can be attached to @cgrp and return the parent */
find_parent_sched(struct cgroup * cgrp)6987 static struct scx_sched *find_parent_sched(struct cgroup *cgrp)
6988 {
6989 struct scx_sched *parent = cgrp->scx_sched;
6990 struct scx_sched *pos;
6991
6992 lockdep_assert_held(&scx_sched_lock);
6993
6994 /* can't attach twice to the same cgroup */
6995 if (parent->cgrp == cgrp)
6996 return ERR_PTR(-EBUSY);
6997
6998 /* does $parent allow sub-scheds? */
6999 if (!parent->ops.sub_attach)
7000 return ERR_PTR(-EOPNOTSUPP);
7001
7002 /* can't insert between $parent and its exiting children */
7003 list_for_each_entry(pos, &parent->children, sibling)
7004 if (cgroup_is_descendant(pos->cgrp, cgrp))
7005 return ERR_PTR(-EBUSY);
7006
7007 return parent;
7008 }
7009
assert_task_ready_or_enabled(struct task_struct * p)7010 static bool assert_task_ready_or_enabled(struct task_struct *p)
7011 {
7012 u32 state = scx_get_task_state(p);
7013
7014 switch (state) {
7015 case SCX_TASK_READY:
7016 case SCX_TASK_ENABLED:
7017 return true;
7018 default:
7019 WARN_ONCE(true, "sched_ext: Invalid task state %d for %s[%d] during enabling sub sched",
7020 state, p->comm, p->pid);
7021 return false;
7022 }
7023 }
7024
scx_sub_enable_workfn(struct kthread_work * work)7025 static void scx_sub_enable_workfn(struct kthread_work *work)
7026 {
7027 struct scx_enable_cmd *cmd = container_of(work, struct scx_enable_cmd, work);
7028 struct sched_ext_ops *ops = cmd->ops;
7029 struct cgroup *cgrp;
7030 struct scx_sched *parent, *sch;
7031 struct scx_task_iter sti;
7032 struct task_struct *p;
7033 s32 i, ret;
7034
7035 mutex_lock(&scx_enable_mutex);
7036
7037 if (!scx_enabled()) {
7038 ret = -ENODEV;
7039 goto out_unlock;
7040 }
7041
7042 cgrp = cgroup_get_from_id(ops->sub_cgroup_id);
7043 if (IS_ERR(cgrp)) {
7044 ret = PTR_ERR(cgrp);
7045 goto out_unlock;
7046 }
7047
7048 raw_spin_lock_irq(&scx_sched_lock);
7049 parent = find_parent_sched(cgrp);
7050 if (IS_ERR(parent)) {
7051 raw_spin_unlock_irq(&scx_sched_lock);
7052 ret = PTR_ERR(parent);
7053 goto out_put_cgrp;
7054 }
7055 kobject_get(&parent->kobj);
7056 raw_spin_unlock_irq(&scx_sched_lock);
7057
7058 /* scx_alloc_and_add_sched() consumes @cgrp whether it succeeds or not */
7059 sch = scx_alloc_and_add_sched(ops, cgrp, parent);
7060 kobject_put(&parent->kobj);
7061 if (IS_ERR(sch)) {
7062 ret = PTR_ERR(sch);
7063 goto out_unlock;
7064 }
7065
7066 ret = scx_link_sched(sch);
7067 if (ret)
7068 goto err_disable;
7069
7070 if (sch->level >= SCX_SUB_MAX_DEPTH) {
7071 scx_error(sch, "max nesting depth %d violated",
7072 SCX_SUB_MAX_DEPTH);
7073 goto err_disable;
7074 }
7075
7076 if (sch->ops.init) {
7077 ret = SCX_CALL_OP_RET(sch, init, NULL);
7078 if (ret) {
7079 ret = ops_sanitize_err(sch, "init", ret);
7080 scx_error(sch, "ops.init() failed (%d)", ret);
7081 goto err_disable;
7082 }
7083 sch->exit_info->flags |= SCX_EFLAG_INITIALIZED;
7084 }
7085
7086 if (validate_ops(sch, ops))
7087 goto err_disable;
7088
7089 struct scx_sub_attach_args sub_attach_args = {
7090 .ops = &sch->ops,
7091 .cgroup_path = sch->cgrp_path,
7092 };
7093
7094 ret = SCX_CALL_OP_RET(parent, sub_attach, NULL,
7095 &sub_attach_args);
7096 if (ret) {
7097 ret = ops_sanitize_err(sch, "sub_attach", ret);
7098 scx_error(sch, "parent rejected (%d)", ret);
7099 goto err_disable;
7100 }
7101 sch->sub_attached = true;
7102
7103 scx_bypass(sch, true);
7104
7105 for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
7106 if (((void (**)(void))ops)[i])
7107 set_bit(i, sch->has_op);
7108
7109 percpu_down_write(&scx_fork_rwsem);
7110 scx_cgroup_lock();
7111
7112 /*
7113 * Set cgroup->scx_sched's and check CSS_ONLINE. Either we see
7114 * !CSS_ONLINE or scx_cgroup_lifetime_notify() sees and shoots us down.
7115 */
7116 set_cgroup_sched(sch_cgroup(sch), sch);
7117 if (!(cgrp->self.flags & CSS_ONLINE)) {
7118 scx_error(sch, "cgroup is not online");
7119 goto err_unlock_and_disable;
7120 }
7121
7122 /*
7123 * Initialize tasks for the new child $sch without exiting them for
7124 * $parent so that the tasks can always be reverted back to $parent
7125 * sched on child init failure.
7126 */
7127 WARN_ON_ONCE(scx_enabling_sub_sched);
7128 scx_enabling_sub_sched = sch;
7129
7130 scx_task_iter_start(&sti, sch->cgrp);
7131 while ((p = scx_task_iter_next_locked(&sti))) {
7132 struct rq *rq;
7133 struct rq_flags rf;
7134
7135 /*
7136 * Task iteration may visit the same task twice when racing
7137 * against exiting. Use %SCX_TASK_SUB_INIT to mark tasks which
7138 * finished __scx_init_task() and skip if set.
7139 *
7140 * A task may exit and get freed between __scx_init_task()
7141 * completion and scx_enable_task(). In such cases,
7142 * scx_disable_and_exit_task() must exit the task for both the
7143 * parent and child scheds.
7144 */
7145 if (p->scx.flags & SCX_TASK_SUB_INIT)
7146 continue;
7147
7148 /* see scx_root_enable() */
7149 if (!tryget_task_struct(p))
7150 continue;
7151
7152 if (!assert_task_ready_or_enabled(p)) {
7153 ret = -EINVAL;
7154 goto abort;
7155 }
7156
7157 scx_task_iter_unlock(&sti);
7158
7159 /*
7160 * As $p is still on $parent, it can't be transitioned to INIT.
7161 * Let's worry about task state later. Use __scx_init_task().
7162 */
7163 ret = __scx_init_task(sch, p, false);
7164 if (ret)
7165 goto abort;
7166
7167 rq = task_rq_lock(p, &rf);
7168 p->scx.flags |= SCX_TASK_SUB_INIT;
7169 task_rq_unlock(rq, p, &rf);
7170
7171 put_task_struct(p);
7172 }
7173 scx_task_iter_stop(&sti);
7174
7175 /*
7176 * All tasks are prepped. Disable/exit tasks for $parent and enable for
7177 * the new @sch.
7178 */
7179 scx_task_iter_start(&sti, sch->cgrp);
7180 while ((p = scx_task_iter_next_locked(&sti))) {
7181 /*
7182 * Use clearing of %SCX_TASK_SUB_INIT to detect and skip
7183 * duplicate iterations.
7184 */
7185 if (!(p->scx.flags & SCX_TASK_SUB_INIT))
7186 continue;
7187
7188 scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_MOVE) {
7189 /*
7190 * $p must be either READY or ENABLED. If ENABLED,
7191 * __scx_disabled_and_exit_task() first disables and
7192 * makes it READY. However, after exiting $p, it will
7193 * leave $p as READY.
7194 */
7195 assert_task_ready_or_enabled(p);
7196 __scx_disable_and_exit_task(parent, p);
7197
7198 /*
7199 * $p is now only initialized for @sch and READY, which
7200 * is what we want. Assign it to @sch and enable.
7201 */
7202 rcu_assign_pointer(p->scx.sched, sch);
7203 scx_enable_task(sch, p);
7204
7205 p->scx.flags &= ~SCX_TASK_SUB_INIT;
7206 }
7207 }
7208 scx_task_iter_stop(&sti);
7209
7210 scx_enabling_sub_sched = NULL;
7211
7212 scx_cgroup_unlock();
7213 percpu_up_write(&scx_fork_rwsem);
7214
7215 scx_bypass(sch, false);
7216
7217 pr_info("sched_ext: BPF sub-scheduler \"%s\" enabled\n", sch->ops.name);
7218 kobject_uevent(&sch->kobj, KOBJ_ADD);
7219 ret = 0;
7220 goto out_unlock;
7221
7222 out_put_cgrp:
7223 cgroup_put(cgrp);
7224 out_unlock:
7225 mutex_unlock(&scx_enable_mutex);
7226 cmd->ret = ret;
7227 return;
7228
7229 abort:
7230 put_task_struct(p);
7231 scx_task_iter_stop(&sti);
7232
7233 /*
7234 * Undo __scx_init_task() for tasks we marked. scx_enable_task() never
7235 * ran for @sch on them, so calling scx_disable_task() here would invoke
7236 * ops.disable() without a matching ops.enable(). scx_enabling_sub_sched
7237 * must stay set until SUB_INIT is cleared from every marked task -
7238 * scx_disable_and_exit_task() reads it when a task exits concurrently.
7239 */
7240 scx_task_iter_start(&sti, sch->cgrp);
7241 while ((p = scx_task_iter_next_locked(&sti))) {
7242 if (p->scx.flags & SCX_TASK_SUB_INIT) {
7243 scx_sub_init_cancel_task(sch, p);
7244 p->scx.flags &= ~SCX_TASK_SUB_INIT;
7245 }
7246 }
7247 scx_task_iter_stop(&sti);
7248 scx_enabling_sub_sched = NULL;
7249 err_unlock_and_disable:
7250 /* we'll soon enter disable path, keep bypass on */
7251 scx_cgroup_unlock();
7252 percpu_up_write(&scx_fork_rwsem);
7253 err_disable:
7254 mutex_unlock(&scx_enable_mutex);
7255 scx_flush_disable_work(sch);
7256 cmd->ret = 0;
7257 }
7258
scx_cgroup_lifetime_notify(struct notifier_block * nb,unsigned long action,void * data)7259 static s32 scx_cgroup_lifetime_notify(struct notifier_block *nb,
7260 unsigned long action, void *data)
7261 {
7262 struct cgroup *cgrp = data;
7263 struct cgroup *parent = cgroup_parent(cgrp);
7264
7265 if (!cgroup_on_dfl(cgrp))
7266 return NOTIFY_OK;
7267
7268 switch (action) {
7269 case CGROUP_LIFETIME_ONLINE:
7270 /* inherit ->scx_sched from $parent */
7271 if (parent)
7272 rcu_assign_pointer(cgrp->scx_sched, parent->scx_sched);
7273 break;
7274 case CGROUP_LIFETIME_OFFLINE:
7275 /* if there is a sched attached, shoot it down */
7276 if (cgrp->scx_sched && cgrp->scx_sched->cgrp == cgrp)
7277 scx_exit(cgrp->scx_sched, SCX_EXIT_UNREG_KERN,
7278 SCX_ECODE_RSN_CGROUP_OFFLINE,
7279 "cgroup %llu going offline", cgroup_id(cgrp));
7280 break;
7281 }
7282
7283 return NOTIFY_OK;
7284 }
7285
7286 static struct notifier_block scx_cgroup_lifetime_nb = {
7287 .notifier_call = scx_cgroup_lifetime_notify,
7288 };
7289
scx_cgroup_lifetime_notifier_init(void)7290 static s32 __init scx_cgroup_lifetime_notifier_init(void)
7291 {
7292 return blocking_notifier_chain_register(&cgroup_lifetime_notifier,
7293 &scx_cgroup_lifetime_nb);
7294 }
7295 core_initcall(scx_cgroup_lifetime_notifier_init);
7296 #endif /* CONFIG_EXT_SUB_SCHED */
7297
scx_enable(struct sched_ext_ops * ops,struct bpf_link * link)7298 static s32 scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
7299 {
7300 static struct kthread_worker *helper;
7301 static DEFINE_MUTEX(helper_mutex);
7302 struct scx_enable_cmd cmd;
7303
7304 if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
7305 cpu_possible_mask)) {
7306 pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
7307 return -EINVAL;
7308 }
7309
7310 if (!READ_ONCE(helper)) {
7311 mutex_lock(&helper_mutex);
7312 if (!helper) {
7313 struct kthread_worker *w =
7314 kthread_run_worker(0, "scx_enable_helper");
7315 if (IS_ERR_OR_NULL(w)) {
7316 mutex_unlock(&helper_mutex);
7317 return -ENOMEM;
7318 }
7319 sched_set_fifo(w->task);
7320 WRITE_ONCE(helper, w);
7321 }
7322 mutex_unlock(&helper_mutex);
7323 }
7324
7325 #ifdef CONFIG_EXT_SUB_SCHED
7326 if (ops->sub_cgroup_id > 1)
7327 kthread_init_work(&cmd.work, scx_sub_enable_workfn);
7328 else
7329 #endif /* CONFIG_EXT_SUB_SCHED */
7330 kthread_init_work(&cmd.work, scx_root_enable_workfn);
7331 cmd.ops = ops;
7332
7333 kthread_queue_work(READ_ONCE(helper), &cmd.work);
7334 kthread_flush_work(&cmd.work);
7335 return cmd.ret;
7336 }
7337
7338
7339 /********************************************************************************
7340 * bpf_struct_ops plumbing.
7341 */
7342 #include <linux/bpf_verifier.h>
7343 #include <linux/bpf.h>
7344 #include <linux/btf.h>
7345
7346 static const struct btf_type *task_struct_type;
7347
bpf_scx_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)7348 static bool bpf_scx_is_valid_access(int off, int size,
7349 enum bpf_access_type type,
7350 const struct bpf_prog *prog,
7351 struct bpf_insn_access_aux *info)
7352 {
7353 if (type != BPF_READ)
7354 return false;
7355 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
7356 return false;
7357 if (off % size != 0)
7358 return false;
7359
7360 return btf_ctx_access(off, size, type, prog, info);
7361 }
7362
bpf_scx_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size)7363 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
7364 const struct bpf_reg_state *reg, int off,
7365 int size)
7366 {
7367 const struct btf_type *t;
7368
7369 t = btf_type_by_id(reg->btf, reg->btf_id);
7370 if (t == task_struct_type) {
7371 /*
7372 * COMPAT: Will be removed in v6.23.
7373 */
7374 if ((off >= offsetof(struct task_struct, scx.slice) &&
7375 off + size <= offsetofend(struct task_struct, scx.slice)) ||
7376 (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
7377 off + size <= offsetofend(struct task_struct, scx.dsq_vtime))) {
7378 pr_warn("sched_ext: Writing directly to p->scx.slice/dsq_vtime is deprecated, use scx_bpf_task_set_slice/dsq_vtime()");
7379 return SCALAR_VALUE;
7380 }
7381
7382 if (off >= offsetof(struct task_struct, scx.disallow) &&
7383 off + size <= offsetofend(struct task_struct, scx.disallow))
7384 return SCALAR_VALUE;
7385 }
7386
7387 return -EACCES;
7388 }
7389
7390 static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
7391 .get_func_proto = bpf_base_func_proto,
7392 .is_valid_access = bpf_scx_is_valid_access,
7393 .btf_struct_access = bpf_scx_btf_struct_access,
7394 };
7395
bpf_scx_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)7396 static int bpf_scx_init_member(const struct btf_type *t,
7397 const struct btf_member *member,
7398 void *kdata, const void *udata)
7399 {
7400 const struct sched_ext_ops *uops = udata;
7401 struct sched_ext_ops *ops = kdata;
7402 u32 moff = __btf_member_bit_offset(t, member) / 8;
7403 int ret;
7404
7405 switch (moff) {
7406 case offsetof(struct sched_ext_ops, dispatch_max_batch):
7407 if (*(u32 *)(udata + moff) > INT_MAX)
7408 return -E2BIG;
7409 ops->dispatch_max_batch = *(u32 *)(udata + moff);
7410 return 1;
7411 case offsetof(struct sched_ext_ops, flags):
7412 if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
7413 return -EINVAL;
7414 ops->flags = *(u64 *)(udata + moff);
7415 return 1;
7416 case offsetof(struct sched_ext_ops, name):
7417 ret = bpf_obj_name_cpy(ops->name, uops->name,
7418 sizeof(ops->name));
7419 if (ret < 0)
7420 return ret;
7421 if (ret == 0)
7422 return -EINVAL;
7423 return 1;
7424 case offsetof(struct sched_ext_ops, timeout_ms):
7425 if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
7426 SCX_WATCHDOG_MAX_TIMEOUT)
7427 return -E2BIG;
7428 ops->timeout_ms = *(u32 *)(udata + moff);
7429 return 1;
7430 case offsetof(struct sched_ext_ops, exit_dump_len):
7431 ops->exit_dump_len =
7432 *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
7433 return 1;
7434 case offsetof(struct sched_ext_ops, hotplug_seq):
7435 ops->hotplug_seq = *(u64 *)(udata + moff);
7436 return 1;
7437 #ifdef CONFIG_EXT_SUB_SCHED
7438 case offsetof(struct sched_ext_ops, sub_cgroup_id):
7439 ops->sub_cgroup_id = *(u64 *)(udata + moff);
7440 return 1;
7441 #endif /* CONFIG_EXT_SUB_SCHED */
7442 }
7443
7444 return 0;
7445 }
7446
7447 #ifdef CONFIG_EXT_SUB_SCHED
scx_pstack_recursion_on_dispatch(struct bpf_prog * prog)7448 static void scx_pstack_recursion_on_dispatch(struct bpf_prog *prog)
7449 {
7450 struct scx_sched *sch;
7451
7452 guard(rcu)();
7453 sch = scx_prog_sched(prog->aux);
7454 if (unlikely(!sch))
7455 return;
7456
7457 scx_error(sch, "dispatch recursion detected");
7458 }
7459 #endif /* CONFIG_EXT_SUB_SCHED */
7460
bpf_scx_check_member(const struct btf_type * t,const struct btf_member * member,const struct bpf_prog * prog)7461 static int bpf_scx_check_member(const struct btf_type *t,
7462 const struct btf_member *member,
7463 const struct bpf_prog *prog)
7464 {
7465 u32 moff = __btf_member_bit_offset(t, member) / 8;
7466
7467 switch (moff) {
7468 case offsetof(struct sched_ext_ops, init_task):
7469 #ifdef CONFIG_EXT_GROUP_SCHED
7470 case offsetof(struct sched_ext_ops, cgroup_init):
7471 case offsetof(struct sched_ext_ops, cgroup_exit):
7472 case offsetof(struct sched_ext_ops, cgroup_prep_move):
7473 #endif
7474 case offsetof(struct sched_ext_ops, cpu_online):
7475 case offsetof(struct sched_ext_ops, cpu_offline):
7476 case offsetof(struct sched_ext_ops, init):
7477 case offsetof(struct sched_ext_ops, exit):
7478 case offsetof(struct sched_ext_ops, sub_attach):
7479 case offsetof(struct sched_ext_ops, sub_detach):
7480 break;
7481 default:
7482 if (prog->sleepable)
7483 return -EINVAL;
7484 }
7485
7486 #ifdef CONFIG_EXT_SUB_SCHED
7487 /*
7488 * Enable private stack for operations that can nest along the
7489 * hierarchy.
7490 *
7491 * XXX - Ideally, we should only do this for scheds that allow
7492 * sub-scheds and sub-scheds themselves but I don't know how to access
7493 * struct_ops from here.
7494 */
7495 switch (moff) {
7496 case offsetof(struct sched_ext_ops, dispatch):
7497 prog->aux->priv_stack_requested = true;
7498 prog->aux->recursion_detected = scx_pstack_recursion_on_dispatch;
7499 }
7500 #endif /* CONFIG_EXT_SUB_SCHED */
7501
7502 return 0;
7503 }
7504
bpf_scx_reg(void * kdata,struct bpf_link * link)7505 static int bpf_scx_reg(void *kdata, struct bpf_link *link)
7506 {
7507 return scx_enable(kdata, link);
7508 }
7509
bpf_scx_unreg(void * kdata,struct bpf_link * link)7510 static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
7511 {
7512 struct sched_ext_ops *ops = kdata;
7513 struct scx_sched *sch = rcu_dereference_protected(ops->priv, true);
7514
7515 scx_disable(sch, SCX_EXIT_UNREG);
7516 scx_flush_disable_work(sch);
7517 RCU_INIT_POINTER(ops->priv, NULL);
7518 kobject_put(&sch->kobj);
7519 }
7520
bpf_scx_init(struct btf * btf)7521 static int bpf_scx_init(struct btf *btf)
7522 {
7523 task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]);
7524
7525 return 0;
7526 }
7527
bpf_scx_update(void * kdata,void * old_kdata,struct bpf_link * link)7528 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
7529 {
7530 /*
7531 * sched_ext does not support updating the actively-loaded BPF
7532 * scheduler, as registering a BPF scheduler can always fail if the
7533 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
7534 * etc. Similarly, we can always race with unregistration happening
7535 * elsewhere, such as with sysrq.
7536 */
7537 return -EOPNOTSUPP;
7538 }
7539
bpf_scx_validate(void * kdata)7540 static int bpf_scx_validate(void *kdata)
7541 {
7542 return 0;
7543 }
7544
sched_ext_ops__select_cpu(struct task_struct * p,s32 prev_cpu,u64 wake_flags)7545 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
sched_ext_ops__enqueue(struct task_struct * p,u64 enq_flags)7546 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__dequeue(struct task_struct * p,u64 enq_flags)7547 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__dispatch(s32 prev_cpu,struct task_struct * prev__nullable)7548 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {}
sched_ext_ops__tick(struct task_struct * p)7549 static void sched_ext_ops__tick(struct task_struct *p) {}
sched_ext_ops__runnable(struct task_struct * p,u64 enq_flags)7550 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__running(struct task_struct * p)7551 static void sched_ext_ops__running(struct task_struct *p) {}
sched_ext_ops__stopping(struct task_struct * p,bool runnable)7552 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {}
sched_ext_ops__quiescent(struct task_struct * p,u64 deq_flags)7553 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {}
sched_ext_ops__yield(struct task_struct * from,struct task_struct * to__nullable)7554 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; }
sched_ext_ops__core_sched_before(struct task_struct * a,struct task_struct * b)7555 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; }
sched_ext_ops__set_weight(struct task_struct * p,u32 weight)7556 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {}
sched_ext_ops__set_cpumask(struct task_struct * p,const struct cpumask * mask)7557 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {}
sched_ext_ops__update_idle(s32 cpu,bool idle)7558 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {}
sched_ext_ops__cpu_acquire(s32 cpu,struct scx_cpu_acquire_args * args)7559 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {}
sched_ext_ops__cpu_release(s32 cpu,struct scx_cpu_release_args * args)7560 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {}
sched_ext_ops__init_task(struct task_struct * p,struct scx_init_task_args * args)7561 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
sched_ext_ops__exit_task(struct task_struct * p,struct scx_exit_task_args * args)7562 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {}
sched_ext_ops__enable(struct task_struct * p)7563 static void sched_ext_ops__enable(struct task_struct *p) {}
sched_ext_ops__disable(struct task_struct * p)7564 static void sched_ext_ops__disable(struct task_struct *p) {}
7565 #ifdef CONFIG_EXT_GROUP_SCHED
sched_ext_ops__cgroup_init(struct cgroup * cgrp,struct scx_cgroup_init_args * args)7566 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
sched_ext_ops__cgroup_exit(struct cgroup * cgrp)7567 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {}
sched_ext_ops__cgroup_prep_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)7568 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
sched_ext_ops__cgroup_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)7569 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
sched_ext_ops__cgroup_cancel_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)7570 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
sched_ext_ops__cgroup_set_weight(struct cgroup * cgrp,u32 weight)7571 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {}
sched_ext_ops__cgroup_set_bandwidth(struct cgroup * cgrp,u64 period_us,u64 quota_us,u64 burst_us)7572 static void sched_ext_ops__cgroup_set_bandwidth(struct cgroup *cgrp, u64 period_us, u64 quota_us, u64 burst_us) {}
sched_ext_ops__cgroup_set_idle(struct cgroup * cgrp,bool idle)7573 static void sched_ext_ops__cgroup_set_idle(struct cgroup *cgrp, bool idle) {}
7574 #endif /* CONFIG_EXT_GROUP_SCHED */
sched_ext_ops__sub_attach(struct scx_sub_attach_args * args)7575 static s32 sched_ext_ops__sub_attach(struct scx_sub_attach_args *args) { return -EINVAL; }
sched_ext_ops__sub_detach(struct scx_sub_detach_args * args)7576 static void sched_ext_ops__sub_detach(struct scx_sub_detach_args *args) {}
sched_ext_ops__cpu_online(s32 cpu)7577 static void sched_ext_ops__cpu_online(s32 cpu) {}
sched_ext_ops__cpu_offline(s32 cpu)7578 static void sched_ext_ops__cpu_offline(s32 cpu) {}
sched_ext_ops__init(void)7579 static s32 sched_ext_ops__init(void) { return -EINVAL; }
sched_ext_ops__exit(struct scx_exit_info * info)7580 static void sched_ext_ops__exit(struct scx_exit_info *info) {}
sched_ext_ops__dump(struct scx_dump_ctx * ctx)7581 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {}
sched_ext_ops__dump_cpu(struct scx_dump_ctx * ctx,s32 cpu,bool idle)7582 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
sched_ext_ops__dump_task(struct scx_dump_ctx * ctx,struct task_struct * p)7583 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {}
7584
7585 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
7586 .select_cpu = sched_ext_ops__select_cpu,
7587 .enqueue = sched_ext_ops__enqueue,
7588 .dequeue = sched_ext_ops__dequeue,
7589 .dispatch = sched_ext_ops__dispatch,
7590 .tick = sched_ext_ops__tick,
7591 .runnable = sched_ext_ops__runnable,
7592 .running = sched_ext_ops__running,
7593 .stopping = sched_ext_ops__stopping,
7594 .quiescent = sched_ext_ops__quiescent,
7595 .yield = sched_ext_ops__yield,
7596 .core_sched_before = sched_ext_ops__core_sched_before,
7597 .set_weight = sched_ext_ops__set_weight,
7598 .set_cpumask = sched_ext_ops__set_cpumask,
7599 .update_idle = sched_ext_ops__update_idle,
7600 .cpu_acquire = sched_ext_ops__cpu_acquire,
7601 .cpu_release = sched_ext_ops__cpu_release,
7602 .init_task = sched_ext_ops__init_task,
7603 .exit_task = sched_ext_ops__exit_task,
7604 .enable = sched_ext_ops__enable,
7605 .disable = sched_ext_ops__disable,
7606 #ifdef CONFIG_EXT_GROUP_SCHED
7607 .cgroup_init = sched_ext_ops__cgroup_init,
7608 .cgroup_exit = sched_ext_ops__cgroup_exit,
7609 .cgroup_prep_move = sched_ext_ops__cgroup_prep_move,
7610 .cgroup_move = sched_ext_ops__cgroup_move,
7611 .cgroup_cancel_move = sched_ext_ops__cgroup_cancel_move,
7612 .cgroup_set_weight = sched_ext_ops__cgroup_set_weight,
7613 .cgroup_set_bandwidth = sched_ext_ops__cgroup_set_bandwidth,
7614 .cgroup_set_idle = sched_ext_ops__cgroup_set_idle,
7615 #endif
7616 .sub_attach = sched_ext_ops__sub_attach,
7617 .sub_detach = sched_ext_ops__sub_detach,
7618 .cpu_online = sched_ext_ops__cpu_online,
7619 .cpu_offline = sched_ext_ops__cpu_offline,
7620 .init = sched_ext_ops__init,
7621 .exit = sched_ext_ops__exit,
7622 .dump = sched_ext_ops__dump,
7623 .dump_cpu = sched_ext_ops__dump_cpu,
7624 .dump_task = sched_ext_ops__dump_task,
7625 };
7626
7627 static struct bpf_struct_ops bpf_sched_ext_ops = {
7628 .verifier_ops = &bpf_scx_verifier_ops,
7629 .reg = bpf_scx_reg,
7630 .unreg = bpf_scx_unreg,
7631 .check_member = bpf_scx_check_member,
7632 .init_member = bpf_scx_init_member,
7633 .init = bpf_scx_init,
7634 .update = bpf_scx_update,
7635 .validate = bpf_scx_validate,
7636 .name = "sched_ext_ops",
7637 .owner = THIS_MODULE,
7638 .cfi_stubs = &__bpf_ops_sched_ext_ops
7639 };
7640
7641
7642 /********************************************************************************
7643 * System integration and init.
7644 */
7645
sysrq_handle_sched_ext_reset(u8 key)7646 static void sysrq_handle_sched_ext_reset(u8 key)
7647 {
7648 struct scx_sched *sch;
7649
7650 rcu_read_lock();
7651 sch = rcu_dereference(scx_root);
7652 if (likely(sch))
7653 scx_disable(sch, SCX_EXIT_SYSRQ);
7654 else
7655 pr_info("sched_ext: BPF schedulers not loaded\n");
7656 rcu_read_unlock();
7657 }
7658
7659 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
7660 .handler = sysrq_handle_sched_ext_reset,
7661 .help_msg = "reset-sched-ext(S)",
7662 .action_msg = "Disable sched_ext and revert all tasks to CFS",
7663 .enable_mask = SYSRQ_ENABLE_RTNICE,
7664 };
7665
sysrq_handle_sched_ext_dump(u8 key)7666 static void sysrq_handle_sched_ext_dump(u8 key)
7667 {
7668 struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
7669 struct scx_sched *sch;
7670
7671 list_for_each_entry_rcu(sch, &scx_sched_all, all)
7672 scx_dump_state(sch, &ei, 0, false);
7673 }
7674
7675 static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
7676 .handler = sysrq_handle_sched_ext_dump,
7677 .help_msg = "dump-sched-ext(D)",
7678 .action_msg = "Trigger sched_ext debug dump",
7679 .enable_mask = SYSRQ_ENABLE_RTNICE,
7680 };
7681
can_skip_idle_kick(struct rq * rq)7682 static bool can_skip_idle_kick(struct rq *rq)
7683 {
7684 lockdep_assert_rq_held(rq);
7685
7686 /*
7687 * We can skip idle kicking if @rq is going to go through at least one
7688 * full SCX scheduling cycle before going idle. Just checking whether
7689 * curr is not idle is insufficient because we could be racing
7690 * balance_one() trying to pull the next task from a remote rq, which
7691 * may fail, and @rq may become idle afterwards.
7692 *
7693 * The race window is small and we don't and can't guarantee that @rq is
7694 * only kicked while idle anyway. Skip only when sure.
7695 */
7696 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
7697 }
7698
kick_one_cpu(s32 cpu,struct rq * this_rq,unsigned long * ksyncs)7699 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *ksyncs)
7700 {
7701 struct rq *rq = cpu_rq(cpu);
7702 struct scx_rq *this_scx = &this_rq->scx;
7703 const struct sched_class *cur_class;
7704 bool should_wait = false;
7705 unsigned long flags;
7706
7707 raw_spin_rq_lock_irqsave(rq, flags);
7708 cur_class = rq->curr->sched_class;
7709
7710 /*
7711 * During CPU hotplug, a CPU may depend on kicking itself to make
7712 * forward progress. Allow kicking self regardless of online state. If
7713 * @cpu is running a higher class task, we have no control over @cpu.
7714 * Skip kicking.
7715 */
7716 if ((cpu_online(cpu) || cpu == cpu_of(this_rq)) &&
7717 !sched_class_above(cur_class, &ext_sched_class)) {
7718 if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
7719 if (cur_class == &ext_sched_class)
7720 rq->curr->scx.slice = 0;
7721 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
7722 }
7723
7724 if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
7725 if (cur_class == &ext_sched_class) {
7726 cpumask_set_cpu(cpu, this_scx->cpus_to_sync);
7727 ksyncs[cpu] = rq->scx.kick_sync;
7728 should_wait = true;
7729 }
7730 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
7731 }
7732
7733 resched_curr(rq);
7734 } else {
7735 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
7736 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
7737 }
7738
7739 raw_spin_rq_unlock_irqrestore(rq, flags);
7740
7741 return should_wait;
7742 }
7743
kick_one_cpu_if_idle(s32 cpu,struct rq * this_rq)7744 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
7745 {
7746 struct rq *rq = cpu_rq(cpu);
7747 unsigned long flags;
7748
7749 raw_spin_rq_lock_irqsave(rq, flags);
7750
7751 if (!can_skip_idle_kick(rq) &&
7752 (cpu_online(cpu) || cpu == cpu_of(this_rq)))
7753 resched_curr(rq);
7754
7755 raw_spin_rq_unlock_irqrestore(rq, flags);
7756 }
7757
kick_cpus_irq_workfn(struct irq_work * irq_work)7758 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
7759 {
7760 struct rq *this_rq = this_rq();
7761 struct scx_rq *this_scx = &this_rq->scx;
7762 struct scx_kick_syncs __rcu *ksyncs_pcpu = __this_cpu_read(scx_kick_syncs);
7763 bool should_wait = false;
7764 unsigned long *ksyncs;
7765 s32 cpu;
7766
7767 /* can race with free_kick_syncs() during scheduler disable */
7768 if (unlikely(!ksyncs_pcpu))
7769 return;
7770
7771 ksyncs = rcu_dereference_bh(ksyncs_pcpu)->syncs;
7772
7773 for_each_cpu(cpu, this_scx->cpus_to_kick) {
7774 should_wait |= kick_one_cpu(cpu, this_rq, ksyncs);
7775 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
7776 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
7777 }
7778
7779 for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
7780 kick_one_cpu_if_idle(cpu, this_rq);
7781 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
7782 }
7783
7784 /*
7785 * Can't wait in hardirq — kick_sync can't advance, deadlocking if
7786 * CPUs wait for each other. Defer to kick_sync_wait_bal_cb().
7787 */
7788 if (should_wait) {
7789 raw_spin_rq_lock(this_rq);
7790 this_scx->kick_sync_pending = true;
7791 resched_curr(this_rq);
7792 raw_spin_rq_unlock(this_rq);
7793 }
7794 }
7795
7796 /**
7797 * print_scx_info - print out sched_ext scheduler state
7798 * @log_lvl: the log level to use when printing
7799 * @p: target task
7800 *
7801 * If a sched_ext scheduler is enabled, print the name and state of the
7802 * scheduler. If @p is on sched_ext, print further information about the task.
7803 *
7804 * This function can be safely called on any task as long as the task_struct
7805 * itself is accessible. While safe, this function isn't synchronized and may
7806 * print out mixups or garbages of limited length.
7807 */
print_scx_info(const char * log_lvl,struct task_struct * p)7808 void print_scx_info(const char *log_lvl, struct task_struct *p)
7809 {
7810 struct scx_sched *sch;
7811 enum scx_enable_state state = scx_enable_state();
7812 const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
7813 char runnable_at_buf[22] = "?";
7814 struct sched_class *class;
7815 unsigned long runnable_at;
7816
7817 guard(rcu)();
7818
7819 sch = scx_task_sched_rcu(p);
7820
7821 if (!sch)
7822 return;
7823
7824 /*
7825 * Carefully check if the task was running on sched_ext, and then
7826 * carefully copy the time it's been runnable, and its state.
7827 */
7828 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
7829 class != &ext_sched_class) {
7830 printk("%sSched_ext: %s (%s%s)", log_lvl, sch->ops.name,
7831 scx_enable_state_str[state], all);
7832 return;
7833 }
7834
7835 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
7836 sizeof(runnable_at)))
7837 scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
7838 jiffies_delta_msecs(runnable_at, jiffies));
7839
7840 /* print everything onto one line to conserve console space */
7841 printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
7842 log_lvl, sch->ops.name, scx_enable_state_str[state], all,
7843 runnable_at_buf);
7844 }
7845
scx_pm_handler(struct notifier_block * nb,unsigned long event,void * ptr)7846 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
7847 {
7848 struct scx_sched *sch;
7849
7850 guard(rcu)();
7851
7852 sch = rcu_dereference(scx_root);
7853 if (!sch)
7854 return NOTIFY_OK;
7855
7856 /*
7857 * SCX schedulers often have userspace components which are sometimes
7858 * involved in critial scheduling paths. PM operations involve freezing
7859 * userspace which can lead to scheduling misbehaviors including stalls.
7860 * Let's bypass while PM operations are in progress.
7861 */
7862 switch (event) {
7863 case PM_HIBERNATION_PREPARE:
7864 case PM_SUSPEND_PREPARE:
7865 case PM_RESTORE_PREPARE:
7866 scx_bypass(sch, true);
7867 break;
7868 case PM_POST_HIBERNATION:
7869 case PM_POST_SUSPEND:
7870 case PM_POST_RESTORE:
7871 scx_bypass(sch, false);
7872 break;
7873 }
7874
7875 return NOTIFY_OK;
7876 }
7877
7878 static struct notifier_block scx_pm_notifier = {
7879 .notifier_call = scx_pm_handler,
7880 };
7881
init_sched_ext_class(void)7882 void __init init_sched_ext_class(void)
7883 {
7884 s32 cpu, v;
7885
7886 /*
7887 * The following is to prevent the compiler from optimizing out the enum
7888 * definitions so that BPF scheduler implementations can use them
7889 * through the generated vmlinux.h.
7890 */
7891 WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
7892 SCX_TG_ONLINE);
7893
7894 scx_idle_init_masks();
7895
7896 for_each_possible_cpu(cpu) {
7897 struct rq *rq = cpu_rq(cpu);
7898 int n = cpu_to_node(cpu);
7899
7900 /* local_dsq's sch will be set during scx_root_enable() */
7901 BUG_ON(init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL, NULL));
7902
7903 INIT_LIST_HEAD(&rq->scx.runnable_list);
7904 INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
7905
7906 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n));
7907 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n));
7908 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n));
7909 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n));
7910 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_sync, GFP_KERNEL, n));
7911 raw_spin_lock_init(&rq->scx.deferred_reenq_lock);
7912 INIT_LIST_HEAD(&rq->scx.deferred_reenq_locals);
7913 INIT_LIST_HEAD(&rq->scx.deferred_reenq_users);
7914 rq->scx.deferred_irq_work = IRQ_WORK_INIT_HARD(deferred_irq_workfn);
7915 rq->scx.kick_cpus_irq_work = IRQ_WORK_INIT_HARD(kick_cpus_irq_workfn);
7916
7917 if (cpu_online(cpu))
7918 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
7919 }
7920
7921 register_sysrq_key('S', &sysrq_sched_ext_reset_op);
7922 register_sysrq_key('D', &sysrq_sched_ext_dump_op);
7923 INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
7924
7925 #ifdef CONFIG_EXT_SUB_SCHED
7926 BUG_ON(rhashtable_init(&scx_sched_hash, &scx_sched_hash_params));
7927 #endif /* CONFIG_EXT_SUB_SCHED */
7928 }
7929
7930
7931 /********************************************************************************
7932 * Helpers that can be called from the BPF scheduler.
7933 */
scx_vet_enq_flags(struct scx_sched * sch,u64 dsq_id,u64 * enq_flags)7934 static bool scx_vet_enq_flags(struct scx_sched *sch, u64 dsq_id, u64 *enq_flags)
7935 {
7936 bool is_local = dsq_id == SCX_DSQ_LOCAL ||
7937 (dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON;
7938
7939 if (*enq_flags & SCX_ENQ_IMMED) {
7940 if (unlikely(!is_local)) {
7941 scx_error(sch, "SCX_ENQ_IMMED on a non-local DSQ 0x%llx", dsq_id);
7942 return false;
7943 }
7944 } else if ((sch->ops.flags & SCX_OPS_ALWAYS_ENQ_IMMED) && is_local) {
7945 *enq_flags |= SCX_ENQ_IMMED;
7946 }
7947
7948 return true;
7949 }
7950
scx_dsq_insert_preamble(struct scx_sched * sch,struct task_struct * p,u64 dsq_id,u64 * enq_flags)7951 static bool scx_dsq_insert_preamble(struct scx_sched *sch, struct task_struct *p,
7952 u64 dsq_id, u64 *enq_flags)
7953 {
7954 lockdep_assert_irqs_disabled();
7955
7956 if (unlikely(!p)) {
7957 scx_error(sch, "called with NULL task");
7958 return false;
7959 }
7960
7961 if (unlikely(*enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
7962 scx_error(sch, "invalid enq_flags 0x%llx", *enq_flags);
7963 return false;
7964 }
7965
7966 /* see SCX_EV_INSERT_NOT_OWNED definition */
7967 if (unlikely(!scx_task_on_sched(sch, p))) {
7968 __scx_add_event(sch, SCX_EV_INSERT_NOT_OWNED, 1);
7969 return false;
7970 }
7971
7972 if (!scx_vet_enq_flags(sch, dsq_id, enq_flags))
7973 return false;
7974
7975 return true;
7976 }
7977
scx_dsq_insert_commit(struct scx_sched * sch,struct task_struct * p,u64 dsq_id,u64 enq_flags)7978 static void scx_dsq_insert_commit(struct scx_sched *sch, struct task_struct *p,
7979 u64 dsq_id, u64 enq_flags)
7980 {
7981 struct scx_dsp_ctx *dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
7982 struct task_struct *ddsp_task;
7983
7984 ddsp_task = __this_cpu_read(direct_dispatch_task);
7985 if (ddsp_task) {
7986 mark_direct_dispatch(sch, ddsp_task, p, dsq_id, enq_flags);
7987 return;
7988 }
7989
7990 if (unlikely(dspc->cursor >= sch->dsp_max_batch)) {
7991 scx_error(sch, "dispatch buffer overflow");
7992 return;
7993 }
7994
7995 dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
7996 .task = p,
7997 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
7998 .dsq_id = dsq_id,
7999 .enq_flags = enq_flags,
8000 };
8001 }
8002
8003 __bpf_kfunc_start_defs();
8004
8005 /**
8006 * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ
8007 * @p: task_struct to insert
8008 * @dsq_id: DSQ to insert into
8009 * @slice: duration @p can run for in nsecs, 0 to keep the current value
8010 * @enq_flags: SCX_ENQ_*
8011 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8012 *
8013 * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to
8014 * call this function spuriously. Can be called from ops.enqueue(),
8015 * ops.select_cpu(), and ops.dispatch().
8016 *
8017 * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
8018 * and @p must match the task being enqueued.
8019 *
8020 * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
8021 * will be directly inserted into the corresponding dispatch queue after
8022 * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be
8023 * inserted into the local DSQ of the CPU returned by ops.select_cpu().
8024 * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
8025 * task is inserted.
8026 *
8027 * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
8028 * and this function can be called upto ops.dispatch_max_batch times to insert
8029 * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
8030 * remaining slots. scx_bpf_dsq_move_to_local() flushes the batch and resets the
8031 * counter.
8032 *
8033 * This function doesn't have any locking restrictions and may be called under
8034 * BPF locks (in the future when BPF introduces more flexible locking).
8035 *
8036 * @p is allowed to run for @slice. The scheduling path is triggered on slice
8037 * exhaustion. If zero, the current residual slice is maintained. If
8038 * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
8039 * scx_bpf_kick_cpu() to trigger scheduling.
8040 *
8041 * Returns %true on successful insertion, %false on failure. On the root
8042 * scheduler, %false return triggers scheduler abort and the caller doesn't need
8043 * to check the return value.
8044 */
scx_bpf_dsq_insert___v2(struct task_struct * p,u64 dsq_id,u64 slice,u64 enq_flags,const struct bpf_prog_aux * aux)8045 __bpf_kfunc bool scx_bpf_dsq_insert___v2(struct task_struct *p, u64 dsq_id,
8046 u64 slice, u64 enq_flags,
8047 const struct bpf_prog_aux *aux)
8048 {
8049 struct scx_sched *sch;
8050
8051 guard(rcu)();
8052 sch = scx_prog_sched(aux);
8053 if (unlikely(!sch))
8054 return false;
8055
8056 if (!scx_dsq_insert_preamble(sch, p, dsq_id, &enq_flags))
8057 return false;
8058
8059 if (slice)
8060 p->scx.slice = slice;
8061 else
8062 p->scx.slice = p->scx.slice ?: 1;
8063
8064 scx_dsq_insert_commit(sch, p, dsq_id, enq_flags);
8065
8066 return true;
8067 }
8068
8069 /*
8070 * COMPAT: Will be removed in v6.23 along with the ___v2 suffix.
8071 */
scx_bpf_dsq_insert(struct task_struct * p,u64 dsq_id,u64 slice,u64 enq_flags,const struct bpf_prog_aux * aux)8072 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id,
8073 u64 slice, u64 enq_flags,
8074 const struct bpf_prog_aux *aux)
8075 {
8076 scx_bpf_dsq_insert___v2(p, dsq_id, slice, enq_flags, aux);
8077 }
8078
scx_dsq_insert_vtime(struct scx_sched * sch,struct task_struct * p,u64 dsq_id,u64 slice,u64 vtime,u64 enq_flags)8079 static bool scx_dsq_insert_vtime(struct scx_sched *sch, struct task_struct *p,
8080 u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags)
8081 {
8082 if (!scx_dsq_insert_preamble(sch, p, dsq_id, &enq_flags))
8083 return false;
8084
8085 if (slice)
8086 p->scx.slice = slice;
8087 else
8088 p->scx.slice = p->scx.slice ?: 1;
8089
8090 p->scx.dsq_vtime = vtime;
8091
8092 scx_dsq_insert_commit(sch, p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
8093
8094 return true;
8095 }
8096
8097 struct scx_bpf_dsq_insert_vtime_args {
8098 /* @p can't be packed together as KF_RCU is not transitive */
8099 u64 dsq_id;
8100 u64 slice;
8101 u64 vtime;
8102 u64 enq_flags;
8103 };
8104
8105 /**
8106 * __scx_bpf_dsq_insert_vtime - Arg-wrapped vtime DSQ insertion
8107 * @p: task_struct to insert
8108 * @args: struct containing the rest of the arguments
8109 * @args->dsq_id: DSQ to insert into
8110 * @args->slice: duration @p can run for in nsecs, 0 to keep the current value
8111 * @args->vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
8112 * @args->enq_flags: SCX_ENQ_*
8113 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8114 *
8115 * Wrapper kfunc that takes arguments via struct to work around BPF's 5 argument
8116 * limit. BPF programs should use scx_bpf_dsq_insert_vtime() which is provided
8117 * as an inline wrapper in common.bpf.h.
8118 *
8119 * Insert @p into the vtime priority queue of the DSQ identified by
8120 * @args->dsq_id. Tasks queued into the priority queue are ordered by
8121 * @args->vtime. All other aspects are identical to scx_bpf_dsq_insert().
8122 *
8123 * @args->vtime ordering is according to time_before64() which considers
8124 * wrapping. A numerically larger vtime may indicate an earlier position in the
8125 * ordering and vice-versa.
8126 *
8127 * A DSQ can only be used as a FIFO or priority queue at any given time and this
8128 * function must not be called on a DSQ which already has one or more FIFO tasks
8129 * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and
8130 * SCX_DSQ_GLOBAL) cannot be used as priority queues.
8131 *
8132 * Returns %true on successful insertion, %false on failure. On the root
8133 * scheduler, %false return triggers scheduler abort and the caller doesn't need
8134 * to check the return value.
8135 */
8136 __bpf_kfunc bool
__scx_bpf_dsq_insert_vtime(struct task_struct * p,struct scx_bpf_dsq_insert_vtime_args * args,const struct bpf_prog_aux * aux)8137 __scx_bpf_dsq_insert_vtime(struct task_struct *p,
8138 struct scx_bpf_dsq_insert_vtime_args *args,
8139 const struct bpf_prog_aux *aux)
8140 {
8141 struct scx_sched *sch;
8142
8143 guard(rcu)();
8144
8145 sch = scx_prog_sched(aux);
8146 if (unlikely(!sch))
8147 return false;
8148
8149 return scx_dsq_insert_vtime(sch, p, args->dsq_id, args->slice,
8150 args->vtime, args->enq_flags);
8151 }
8152
8153 /*
8154 * COMPAT: Will be removed in v6.23.
8155 */
scx_bpf_dsq_insert_vtime(struct task_struct * p,u64 dsq_id,u64 slice,u64 vtime,u64 enq_flags)8156 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id,
8157 u64 slice, u64 vtime, u64 enq_flags)
8158 {
8159 struct scx_sched *sch;
8160
8161 guard(rcu)();
8162
8163 sch = rcu_dereference(scx_root);
8164 if (unlikely(!sch))
8165 return;
8166
8167 #ifdef CONFIG_EXT_SUB_SCHED
8168 /*
8169 * Disallow if any sub-scheds are attached. There is no way to tell
8170 * which scheduler called us, just error out @p's scheduler.
8171 */
8172 if (unlikely(!list_empty(&sch->children))) {
8173 scx_error(scx_task_sched(p), "__scx_bpf_dsq_insert_vtime() must be used");
8174 return;
8175 }
8176 #endif
8177
8178 scx_dsq_insert_vtime(sch, p, dsq_id, slice, vtime, enq_flags);
8179 }
8180
8181 __bpf_kfunc_end_defs();
8182
8183 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
8184 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_IMPLICIT_ARGS | KF_RCU)
8185 BTF_ID_FLAGS(func, scx_bpf_dsq_insert___v2, KF_IMPLICIT_ARGS | KF_RCU)
8186 BTF_ID_FLAGS(func, __scx_bpf_dsq_insert_vtime, KF_IMPLICIT_ARGS | KF_RCU)
8187 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU)
8188 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
8189
8190 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
8191 .owner = THIS_MODULE,
8192 .set = &scx_kfunc_ids_enqueue_dispatch,
8193 .filter = scx_kfunc_context_filter,
8194 };
8195
scx_dsq_move(struct bpf_iter_scx_dsq_kern * kit,struct task_struct * p,u64 dsq_id,u64 enq_flags)8196 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit,
8197 struct task_struct *p, u64 dsq_id, u64 enq_flags)
8198 {
8199 struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
8200 struct scx_sched *sch;
8201 struct rq *this_rq, *src_rq, *locked_rq;
8202 bool dispatched = false;
8203 bool in_balance;
8204 unsigned long flags;
8205
8206 /*
8207 * The verifier considers an iterator slot initialized on any
8208 * KF_ITER_NEW return, so a BPF program may legally reach here after
8209 * bpf_iter_scx_dsq_new() failed and left @kit->dsq NULL.
8210 */
8211 if (unlikely(!src_dsq))
8212 return false;
8213
8214 sch = src_dsq->sched;
8215
8216 if (!scx_vet_enq_flags(sch, dsq_id, &enq_flags))
8217 return false;
8218
8219 /*
8220 * If the BPF scheduler keeps calling this function repeatedly, it can
8221 * cause similar live-lock conditions as consume_dispatch_q().
8222 */
8223 if (unlikely(READ_ONCE(sch->aborting)))
8224 return false;
8225
8226 if (unlikely(!scx_task_on_sched(sch, p))) {
8227 scx_error(sch, "scx_bpf_dsq_move[_vtime]() on %s[%d] but the task belongs to a different scheduler",
8228 p->comm, p->pid);
8229 return false;
8230 }
8231
8232 /*
8233 * Can be called from either ops.dispatch() locking this_rq() or any
8234 * context where no rq lock is held. If latter, lock @p's task_rq which
8235 * we'll likely need anyway.
8236 */
8237 src_rq = task_rq(p);
8238
8239 local_irq_save(flags);
8240 this_rq = this_rq();
8241 in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
8242
8243 if (in_balance) {
8244 if (this_rq != src_rq) {
8245 raw_spin_rq_unlock(this_rq);
8246 raw_spin_rq_lock(src_rq);
8247 }
8248 } else {
8249 raw_spin_rq_lock(src_rq);
8250 }
8251
8252 locked_rq = src_rq;
8253 raw_spin_lock(&src_dsq->lock);
8254
8255 /* did someone else get to it while we dropped the locks? */
8256 if (nldsq_cursor_lost_task(&kit->cursor, src_rq, src_dsq, p)) {
8257 raw_spin_unlock(&src_dsq->lock);
8258 goto out;
8259 }
8260
8261 /* @p is still on $src_dsq and stable, determine the destination */
8262 dst_dsq = find_dsq_for_dispatch(sch, this_rq, dsq_id, task_cpu(p));
8263
8264 /*
8265 * Apply vtime and slice updates before moving so that the new time is
8266 * visible before inserting into $dst_dsq. @p is still on $src_dsq but
8267 * this is safe as we're locking it.
8268 */
8269 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
8270 p->scx.dsq_vtime = kit->vtime;
8271 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
8272 p->scx.slice = kit->slice;
8273
8274 /* execute move */
8275 locked_rq = move_task_between_dsqs(sch, p, enq_flags, src_dsq, dst_dsq);
8276 dispatched = true;
8277 out:
8278 if (in_balance) {
8279 if (this_rq != locked_rq) {
8280 raw_spin_rq_unlock(locked_rq);
8281 raw_spin_rq_lock(this_rq);
8282 }
8283 } else {
8284 raw_spin_rq_unlock_irqrestore(locked_rq, flags);
8285 }
8286
8287 kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
8288 __SCX_DSQ_ITER_HAS_VTIME);
8289 return dispatched;
8290 }
8291
8292 __bpf_kfunc_start_defs();
8293
8294 /**
8295 * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
8296 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8297 *
8298 * Can only be called from ops.dispatch().
8299 */
scx_bpf_dispatch_nr_slots(const struct bpf_prog_aux * aux)8300 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(const struct bpf_prog_aux *aux)
8301 {
8302 struct scx_sched *sch;
8303
8304 guard(rcu)();
8305
8306 sch = scx_prog_sched(aux);
8307 if (unlikely(!sch))
8308 return 0;
8309
8310 return sch->dsp_max_batch - __this_cpu_read(sch->pcpu->dsp_ctx.cursor);
8311 }
8312
8313 /**
8314 * scx_bpf_dispatch_cancel - Cancel the latest dispatch
8315 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8316 *
8317 * Cancel the latest dispatch. Can be called multiple times to cancel further
8318 * dispatches. Can only be called from ops.dispatch().
8319 */
scx_bpf_dispatch_cancel(const struct bpf_prog_aux * aux)8320 __bpf_kfunc void scx_bpf_dispatch_cancel(const struct bpf_prog_aux *aux)
8321 {
8322 struct scx_sched *sch;
8323 struct scx_dsp_ctx *dspc;
8324
8325 guard(rcu)();
8326
8327 sch = scx_prog_sched(aux);
8328 if (unlikely(!sch))
8329 return;
8330
8331 dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
8332
8333 if (dspc->cursor > 0)
8334 dspc->cursor--;
8335 else
8336 scx_error(sch, "dispatch buffer underflow");
8337 }
8338
8339 /**
8340 * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ
8341 * @dsq_id: DSQ to move task from. Must be a user-created DSQ
8342 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8343 * @enq_flags: %SCX_ENQ_*
8344 *
8345 * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's
8346 * local DSQ for execution with @enq_flags applied. Can only be called from
8347 * ops.dispatch().
8348 *
8349 * Built-in DSQs (%SCX_DSQ_GLOBAL and %SCX_DSQ_LOCAL*) are not supported as
8350 * sources. Local DSQs support reenqueueing (a task can be picked up for
8351 * execution, dequeued for property changes, or reenqueued), but the BPF
8352 * scheduler cannot directly iterate or move tasks from them. %SCX_DSQ_GLOBAL
8353 * is similar but also doesn't support reenqueueing, as it maps to multiple
8354 * per-node DSQs making the scope difficult to define; this may change in the
8355 * future.
8356 *
8357 * This function flushes the in-flight dispatches from scx_bpf_dsq_insert()
8358 * before trying to move from the specified DSQ. It may also grab rq locks and
8359 * thus can't be called under any BPF locks.
8360 *
8361 * Returns %true if a task has been moved, %false if there isn't any task to
8362 * move.
8363 */
scx_bpf_dsq_move_to_local___v2(u64 dsq_id,u64 enq_flags,const struct bpf_prog_aux * aux)8364 __bpf_kfunc bool scx_bpf_dsq_move_to_local___v2(u64 dsq_id, u64 enq_flags,
8365 const struct bpf_prog_aux *aux)
8366 {
8367 struct scx_dispatch_q *dsq;
8368 struct scx_sched *sch;
8369 struct scx_dsp_ctx *dspc;
8370
8371 guard(rcu)();
8372
8373 sch = scx_prog_sched(aux);
8374 if (unlikely(!sch))
8375 return false;
8376
8377 if (!scx_vet_enq_flags(sch, SCX_DSQ_LOCAL, &enq_flags))
8378 return false;
8379
8380 dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
8381
8382 flush_dispatch_buf(sch, dspc->rq);
8383
8384 dsq = find_user_dsq(sch, dsq_id);
8385 if (unlikely(!dsq)) {
8386 scx_error(sch, "invalid DSQ ID 0x%016llx", dsq_id);
8387 return false;
8388 }
8389
8390 if (consume_dispatch_q(sch, dspc->rq, dsq, enq_flags)) {
8391 /*
8392 * A successfully consumed task can be dequeued before it starts
8393 * running while the CPU is trying to migrate other dispatched
8394 * tasks. Bump nr_tasks to tell balance_one() to retry on empty
8395 * local DSQ.
8396 */
8397 dspc->nr_tasks++;
8398 return true;
8399 } else {
8400 return false;
8401 }
8402 }
8403
8404 /*
8405 * COMPAT: ___v2 was introduced in v7.1. Remove this and ___v2 tag in the future.
8406 */
scx_bpf_dsq_move_to_local(u64 dsq_id,const struct bpf_prog_aux * aux)8407 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id, const struct bpf_prog_aux *aux)
8408 {
8409 return scx_bpf_dsq_move_to_local___v2(dsq_id, 0, aux);
8410 }
8411
8412 /**
8413 * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs
8414 * @it__iter: DSQ iterator in progress
8415 * @slice: duration the moved task can run for in nsecs
8416 *
8417 * Override the slice of the next task that will be moved from @it__iter using
8418 * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous
8419 * slice duration is kept.
8420 */
scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq * it__iter,u64 slice)8421 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter,
8422 u64 slice)
8423 {
8424 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
8425
8426 kit->slice = slice;
8427 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
8428 }
8429
8430 /**
8431 * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs
8432 * @it__iter: DSQ iterator in progress
8433 * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
8434 *
8435 * Override the vtime of the next task that will be moved from @it__iter using
8436 * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice
8437 * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the
8438 * override is ignored and cleared.
8439 */
scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq * it__iter,u64 vtime)8440 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter,
8441 u64 vtime)
8442 {
8443 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
8444
8445 kit->vtime = vtime;
8446 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
8447 }
8448
8449 /**
8450 * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ
8451 * @it__iter: DSQ iterator in progress
8452 * @p: task to transfer
8453 * @dsq_id: DSQ to move @p to
8454 * @enq_flags: SCX_ENQ_*
8455 *
8456 * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
8457 * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
8458 * be the destination.
8459 *
8460 * For the transfer to be successful, @p must still be on the DSQ and have been
8461 * queued before the DSQ iteration started. This function doesn't care whether
8462 * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
8463 * been queued before the iteration started.
8464 *
8465 * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update.
8466 *
8467 * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
8468 * lock (e.g. BPF timers or SYSCALL programs).
8469 *
8470 * Returns %true if @p has been consumed, %false if @p had already been
8471 * consumed, dequeued, or, for sub-scheds, @dsq_id points to a disallowed local
8472 * DSQ.
8473 */
scx_bpf_dsq_move(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)8474 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter,
8475 struct task_struct *p, u64 dsq_id,
8476 u64 enq_flags)
8477 {
8478 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
8479 p, dsq_id, enq_flags);
8480 }
8481
8482 /**
8483 * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ
8484 * @it__iter: DSQ iterator in progress
8485 * @p: task to transfer
8486 * @dsq_id: DSQ to move @p to
8487 * @enq_flags: SCX_ENQ_*
8488 *
8489 * Transfer @p which is on the DSQ currently iterated by @it__iter to the
8490 * priority queue of the DSQ specified by @dsq_id. The destination must be a
8491 * user DSQ as only user DSQs support priority queue.
8492 *
8493 * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice()
8494 * and scx_bpf_dsq_move_set_vtime() to update.
8495 *
8496 * All other aspects are identical to scx_bpf_dsq_move(). See
8497 * scx_bpf_dsq_insert_vtime() for more information on @vtime.
8498 */
scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)8499 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter,
8500 struct task_struct *p, u64 dsq_id,
8501 u64 enq_flags)
8502 {
8503 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
8504 p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
8505 }
8506
8507 #ifdef CONFIG_EXT_SUB_SCHED
8508 /**
8509 * scx_bpf_sub_dispatch - Trigger dispatching on a child scheduler
8510 * @cgroup_id: cgroup ID of the child scheduler to dispatch
8511 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8512 *
8513 * Allows a parent scheduler to trigger dispatching on one of its direct
8514 * child schedulers. The child scheduler runs its dispatch operation to
8515 * move tasks from dispatch queues to the local runqueue.
8516 *
8517 * Returns: true on success, false if cgroup_id is invalid, not a direct
8518 * child, or caller lacks dispatch permission.
8519 */
scx_bpf_sub_dispatch(u64 cgroup_id,const struct bpf_prog_aux * aux)8520 __bpf_kfunc bool scx_bpf_sub_dispatch(u64 cgroup_id, const struct bpf_prog_aux *aux)
8521 {
8522 struct rq *this_rq = this_rq();
8523 struct scx_sched *parent, *child;
8524
8525 guard(rcu)();
8526 parent = scx_prog_sched(aux);
8527 if (unlikely(!parent))
8528 return false;
8529
8530 child = scx_find_sub_sched(cgroup_id);
8531
8532 if (unlikely(!child))
8533 return false;
8534
8535 if (unlikely(scx_parent(child) != parent)) {
8536 scx_error(parent, "trying to dispatch a distant sub-sched on cgroup %llu",
8537 cgroup_id);
8538 return false;
8539 }
8540
8541 return scx_dispatch_sched(child, this_rq, this_rq->scx.sub_dispatch_prev,
8542 true);
8543 }
8544 #endif /* CONFIG_EXT_SUB_SCHED */
8545
8546 __bpf_kfunc_end_defs();
8547
8548 BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
8549 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots, KF_IMPLICIT_ARGS)
8550 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel, KF_IMPLICIT_ARGS)
8551 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local, KF_IMPLICIT_ARGS)
8552 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local___v2, KF_IMPLICIT_ARGS)
8553 /* scx_bpf_dsq_move*() also in scx_kfunc_ids_unlocked: callable from unlocked contexts */
8554 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU)
8555 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU)
8556 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
8557 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
8558 #ifdef CONFIG_EXT_SUB_SCHED
8559 BTF_ID_FLAGS(func, scx_bpf_sub_dispatch, KF_IMPLICIT_ARGS)
8560 #endif
8561 BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
8562
8563 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
8564 .owner = THIS_MODULE,
8565 .set = &scx_kfunc_ids_dispatch,
8566 .filter = scx_kfunc_context_filter,
8567 };
8568
8569 __bpf_kfunc_start_defs();
8570
8571 /**
8572 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
8573 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8574 *
8575 * Iterate over all of the tasks currently enqueued on the local DSQ of the
8576 * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
8577 * processed tasks. Can only be called from ops.cpu_release().
8578 */
scx_bpf_reenqueue_local(const struct bpf_prog_aux * aux)8579 __bpf_kfunc u32 scx_bpf_reenqueue_local(const struct bpf_prog_aux *aux)
8580 {
8581 struct scx_sched *sch;
8582 struct rq *rq;
8583
8584 guard(rcu)();
8585 sch = scx_prog_sched(aux);
8586 if (unlikely(!sch))
8587 return 0;
8588
8589 rq = cpu_rq(smp_processor_id());
8590 lockdep_assert_rq_held(rq);
8591
8592 return reenq_local(sch, rq, SCX_REENQ_ANY);
8593 }
8594
8595 __bpf_kfunc_end_defs();
8596
8597 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
8598 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local, KF_IMPLICIT_ARGS)
8599 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
8600
8601 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
8602 .owner = THIS_MODULE,
8603 .set = &scx_kfunc_ids_cpu_release,
8604 .filter = scx_kfunc_context_filter,
8605 };
8606
8607 __bpf_kfunc_start_defs();
8608
8609 /**
8610 * scx_bpf_create_dsq - Create a custom DSQ
8611 * @dsq_id: DSQ to create
8612 * @node: NUMA node to allocate from
8613 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8614 *
8615 * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
8616 * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
8617 */
scx_bpf_create_dsq(u64 dsq_id,s32 node,const struct bpf_prog_aux * aux)8618 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node, const struct bpf_prog_aux *aux)
8619 {
8620 struct scx_dispatch_q *dsq;
8621 struct scx_sched *sch;
8622 s32 ret;
8623
8624 if (unlikely(node >= (int)nr_node_ids ||
8625 (node < 0 && node != NUMA_NO_NODE)))
8626 return -EINVAL;
8627
8628 if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN))
8629 return -EINVAL;
8630
8631 dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
8632 if (!dsq)
8633 return -ENOMEM;
8634
8635 /*
8636 * init_dsq() must be called in GFP_KERNEL context. Init it with NULL
8637 * @sch and update afterwards.
8638 */
8639 ret = init_dsq(dsq, dsq_id, NULL);
8640 if (ret) {
8641 kfree(dsq);
8642 return ret;
8643 }
8644
8645 rcu_read_lock();
8646
8647 sch = scx_prog_sched(aux);
8648 if (sch) {
8649 dsq->sched = sch;
8650 ret = rhashtable_lookup_insert_fast(&sch->dsq_hash, &dsq->hash_node,
8651 dsq_hash_params);
8652 } else {
8653 ret = -ENODEV;
8654 }
8655
8656 rcu_read_unlock();
8657 if (ret) {
8658 exit_dsq(dsq);
8659 kfree(dsq);
8660 }
8661 return ret;
8662 }
8663
8664 __bpf_kfunc_end_defs();
8665
8666 BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
8667 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_IMPLICIT_ARGS | KF_SLEEPABLE)
8668 /* also in scx_kfunc_ids_dispatch: also callable from ops.dispatch() */
8669 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU)
8670 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU)
8671 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
8672 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
8673 /* also in scx_kfunc_ids_select_cpu: also callable from ops.select_cpu()/ops.enqueue() */
8674 BTF_ID_FLAGS(func, __scx_bpf_select_cpu_and, KF_IMPLICIT_ARGS | KF_RCU)
8675 BTF_ID_FLAGS(func, scx_bpf_select_cpu_and, KF_RCU)
8676 BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_IMPLICIT_ARGS | KF_RCU)
8677 BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
8678
8679 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
8680 .owner = THIS_MODULE,
8681 .set = &scx_kfunc_ids_unlocked,
8682 .filter = scx_kfunc_context_filter,
8683 };
8684
8685 __bpf_kfunc_start_defs();
8686
8687 /**
8688 * scx_bpf_task_set_slice - Set task's time slice
8689 * @p: task of interest
8690 * @slice: time slice to set in nsecs
8691 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8692 *
8693 * Set @p's time slice to @slice. Returns %true on success, %false if the
8694 * calling scheduler doesn't have authority over @p.
8695 */
scx_bpf_task_set_slice(struct task_struct * p,u64 slice,const struct bpf_prog_aux * aux)8696 __bpf_kfunc bool scx_bpf_task_set_slice(struct task_struct *p, u64 slice,
8697 const struct bpf_prog_aux *aux)
8698 {
8699 struct scx_sched *sch;
8700
8701 guard(rcu)();
8702 sch = scx_prog_sched(aux);
8703 if (unlikely(!sch || !scx_task_on_sched(sch, p)))
8704 return false;
8705
8706 p->scx.slice = slice;
8707 return true;
8708 }
8709
8710 /**
8711 * scx_bpf_task_set_dsq_vtime - Set task's virtual time for DSQ ordering
8712 * @p: task of interest
8713 * @vtime: virtual time to set
8714 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8715 *
8716 * Set @p's virtual time to @vtime. Returns %true on success, %false if the
8717 * calling scheduler doesn't have authority over @p.
8718 */
scx_bpf_task_set_dsq_vtime(struct task_struct * p,u64 vtime,const struct bpf_prog_aux * aux)8719 __bpf_kfunc bool scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime,
8720 const struct bpf_prog_aux *aux)
8721 {
8722 struct scx_sched *sch;
8723
8724 guard(rcu)();
8725 sch = scx_prog_sched(aux);
8726 if (unlikely(!sch || !scx_task_on_sched(sch, p)))
8727 return false;
8728
8729 p->scx.dsq_vtime = vtime;
8730 return true;
8731 }
8732
scx_kick_cpu(struct scx_sched * sch,s32 cpu,u64 flags)8733 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags)
8734 {
8735 struct rq *this_rq;
8736 unsigned long irq_flags;
8737
8738 if (!ops_cpu_valid(sch, cpu, NULL))
8739 return;
8740
8741 local_irq_save(irq_flags);
8742
8743 this_rq = this_rq();
8744
8745 /*
8746 * While bypassing for PM ops, IRQ handling may not be online which can
8747 * lead to irq_work_queue() malfunction such as infinite busy wait for
8748 * IRQ status update. Suppress kicking.
8749 */
8750 if (scx_bypassing(sch, cpu_of(this_rq)))
8751 goto out;
8752
8753 /*
8754 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
8755 * rq locks. We can probably be smarter and avoid bouncing if called
8756 * from ops which don't hold a rq lock.
8757 */
8758 if (flags & SCX_KICK_IDLE) {
8759 struct rq *target_rq = cpu_rq(cpu);
8760
8761 if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
8762 scx_error(sch, "PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
8763
8764 if (raw_spin_rq_trylock(target_rq)) {
8765 if (can_skip_idle_kick(target_rq)) {
8766 raw_spin_rq_unlock(target_rq);
8767 goto out;
8768 }
8769 raw_spin_rq_unlock(target_rq);
8770 }
8771 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
8772 } else {
8773 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
8774
8775 if (flags & SCX_KICK_PREEMPT)
8776 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
8777 if (flags & SCX_KICK_WAIT)
8778 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
8779 }
8780
8781 irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
8782 out:
8783 local_irq_restore(irq_flags);
8784 }
8785
8786 /**
8787 * scx_bpf_kick_cpu - Trigger reschedule on a CPU
8788 * @cpu: cpu to kick
8789 * @flags: %SCX_KICK_* flags
8790 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8791 *
8792 * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
8793 * trigger rescheduling on a busy CPU. This can be called from any online
8794 * scx_ops operation and the actual kicking is performed asynchronously through
8795 * an irq work.
8796 */
scx_bpf_kick_cpu(s32 cpu,u64 flags,const struct bpf_prog_aux * aux)8797 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags, const struct bpf_prog_aux *aux)
8798 {
8799 struct scx_sched *sch;
8800
8801 guard(rcu)();
8802 sch = scx_prog_sched(aux);
8803 if (likely(sch))
8804 scx_kick_cpu(sch, cpu, flags);
8805 }
8806
8807 /**
8808 * scx_bpf_dsq_nr_queued - Return the number of queued tasks
8809 * @dsq_id: id of the DSQ
8810 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8811 *
8812 * Return the number of tasks in the DSQ matching @dsq_id. If not found,
8813 * -%ENOENT is returned.
8814 */
scx_bpf_dsq_nr_queued(u64 dsq_id,const struct bpf_prog_aux * aux)8815 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id, const struct bpf_prog_aux *aux)
8816 {
8817 struct scx_sched *sch;
8818 struct scx_dispatch_q *dsq;
8819 s32 ret;
8820
8821 preempt_disable();
8822
8823 sch = scx_prog_sched(aux);
8824 if (unlikely(!sch)) {
8825 ret = -ENODEV;
8826 goto out;
8827 }
8828
8829 if (dsq_id == SCX_DSQ_LOCAL) {
8830 ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
8831 goto out;
8832 } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
8833 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
8834
8835 if (ops_cpu_valid(sch, cpu, NULL)) {
8836 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
8837 goto out;
8838 }
8839 } else {
8840 dsq = find_user_dsq(sch, dsq_id);
8841 if (dsq) {
8842 ret = READ_ONCE(dsq->nr);
8843 goto out;
8844 }
8845 }
8846 ret = -ENOENT;
8847 out:
8848 preempt_enable();
8849 return ret;
8850 }
8851
8852 /**
8853 * scx_bpf_destroy_dsq - Destroy a custom DSQ
8854 * @dsq_id: DSQ to destroy
8855 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8856 *
8857 * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
8858 * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
8859 * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
8860 * which doesn't exist. Can be called from any online scx_ops operations.
8861 */
scx_bpf_destroy_dsq(u64 dsq_id,const struct bpf_prog_aux * aux)8862 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id, const struct bpf_prog_aux *aux)
8863 {
8864 struct scx_sched *sch;
8865
8866 guard(rcu)();
8867 sch = scx_prog_sched(aux);
8868 if (sch)
8869 destroy_dsq(sch, dsq_id);
8870 }
8871
8872 /**
8873 * bpf_iter_scx_dsq_new - Create a DSQ iterator
8874 * @it: iterator to initialize
8875 * @dsq_id: DSQ to iterate
8876 * @flags: %SCX_DSQ_ITER_*
8877 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8878 *
8879 * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
8880 * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
8881 * tasks which are already queued when this function is invoked.
8882 */
bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq * it,u64 dsq_id,u64 flags,const struct bpf_prog_aux * aux)8883 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
8884 u64 flags, const struct bpf_prog_aux *aux)
8885 {
8886 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
8887 struct scx_sched *sch;
8888
8889 BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
8890 sizeof(struct bpf_iter_scx_dsq));
8891 BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
8892 __alignof__(struct bpf_iter_scx_dsq));
8893 BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
8894 ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
8895
8896 /*
8897 * next() and destroy() will be called regardless of the return value.
8898 * Always clear $kit->dsq.
8899 */
8900 kit->dsq = NULL;
8901
8902 sch = scx_prog_sched(aux);
8903 if (unlikely(!sch))
8904 return -ENODEV;
8905
8906 if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
8907 return -EINVAL;
8908
8909 kit->dsq = find_user_dsq(sch, dsq_id);
8910 if (!kit->dsq)
8911 return -ENOENT;
8912
8913 kit->cursor = INIT_DSQ_LIST_CURSOR(kit->cursor, kit->dsq, flags);
8914
8915 return 0;
8916 }
8917
8918 /**
8919 * bpf_iter_scx_dsq_next - Progress a DSQ iterator
8920 * @it: iterator to progress
8921 *
8922 * Return the next task. See bpf_iter_scx_dsq_new().
8923 */
bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq * it)8924 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
8925 {
8926 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
8927
8928 if (!kit->dsq)
8929 return NULL;
8930
8931 guard(raw_spinlock_irqsave)(&kit->dsq->lock);
8932
8933 return nldsq_cursor_next_task(&kit->cursor, kit->dsq);
8934 }
8935
8936 /**
8937 * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
8938 * @it: iterator to destroy
8939 *
8940 * Undo scx_iter_scx_dsq_new().
8941 */
bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq * it)8942 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
8943 {
8944 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
8945
8946 if (!kit->dsq)
8947 return;
8948
8949 if (!list_empty(&kit->cursor.node)) {
8950 unsigned long flags;
8951
8952 raw_spin_lock_irqsave(&kit->dsq->lock, flags);
8953 list_del_init(&kit->cursor.node);
8954 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
8955 }
8956 kit->dsq = NULL;
8957 }
8958
8959 /**
8960 * scx_bpf_dsq_peek - Lockless peek at the first element.
8961 * @dsq_id: DSQ to examine.
8962 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
8963 *
8964 * Read the first element in the DSQ. This is semantically equivalent to using
8965 * the DSQ iterator, but is lockfree. Of course, like any lockless operation,
8966 * this provides only a point-in-time snapshot, and the contents may change
8967 * by the time any subsequent locking operation reads the queue.
8968 *
8969 * Returns the pointer, or NULL indicates an empty queue OR internal error.
8970 */
scx_bpf_dsq_peek(u64 dsq_id,const struct bpf_prog_aux * aux)8971 __bpf_kfunc struct task_struct *scx_bpf_dsq_peek(u64 dsq_id,
8972 const struct bpf_prog_aux *aux)
8973 {
8974 struct scx_sched *sch;
8975 struct scx_dispatch_q *dsq;
8976
8977 sch = scx_prog_sched(aux);
8978 if (unlikely(!sch))
8979 return NULL;
8980
8981 if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN)) {
8982 scx_error(sch, "peek disallowed on builtin DSQ 0x%llx", dsq_id);
8983 return NULL;
8984 }
8985
8986 dsq = find_user_dsq(sch, dsq_id);
8987 if (unlikely(!dsq)) {
8988 scx_error(sch, "peek on non-existent DSQ 0x%llx", dsq_id);
8989 return NULL;
8990 }
8991
8992 return rcu_dereference(dsq->first_task);
8993 }
8994
8995 /**
8996 * scx_bpf_dsq_reenq - Re-enqueue tasks on a DSQ
8997 * @dsq_id: DSQ to re-enqueue
8998 * @reenq_flags: %SCX_RENQ_*
8999 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9000 *
9001 * Iterate over all of the tasks currently enqueued on the DSQ identified by
9002 * @dsq_id, and re-enqueue them in the BPF scheduler. The following DSQs are
9003 * supported:
9004 *
9005 * - Local DSQs (%SCX_DSQ_LOCAL or %SCX_DSQ_LOCAL_ON | $cpu)
9006 * - User DSQs
9007 *
9008 * Re-enqueues are performed asynchronously. Can be called from anywhere.
9009 */
scx_bpf_dsq_reenq(u64 dsq_id,u64 reenq_flags,const struct bpf_prog_aux * aux)9010 __bpf_kfunc void scx_bpf_dsq_reenq(u64 dsq_id, u64 reenq_flags,
9011 const struct bpf_prog_aux *aux)
9012 {
9013 struct scx_sched *sch;
9014 struct scx_dispatch_q *dsq;
9015
9016 guard(preempt)();
9017
9018 sch = scx_prog_sched(aux);
9019 if (unlikely(!sch))
9020 return;
9021
9022 if (unlikely(reenq_flags & ~__SCX_REENQ_USER_MASK)) {
9023 scx_error(sch, "invalid SCX_REENQ flags 0x%llx", reenq_flags);
9024 return;
9025 }
9026
9027 /* not specifying any filter bits is the same as %SCX_REENQ_ANY */
9028 if (!(reenq_flags & __SCX_REENQ_FILTER_MASK))
9029 reenq_flags |= SCX_REENQ_ANY;
9030
9031 dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, smp_processor_id());
9032 schedule_dsq_reenq(sch, dsq, reenq_flags, scx_locked_rq());
9033 }
9034
9035 /**
9036 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
9037 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9038 *
9039 * Iterate over all of the tasks currently enqueued on the local DSQ of the
9040 * caller's CPU, and re-enqueue them in the BPF scheduler. Can be called from
9041 * anywhere.
9042 *
9043 * This is now a special case of scx_bpf_dsq_reenq() and may be removed in the
9044 * future.
9045 */
scx_bpf_reenqueue_local___v2(const struct bpf_prog_aux * aux)9046 __bpf_kfunc void scx_bpf_reenqueue_local___v2(const struct bpf_prog_aux *aux)
9047 {
9048 scx_bpf_dsq_reenq(SCX_DSQ_LOCAL, 0, aux);
9049 }
9050
9051 __bpf_kfunc_end_defs();
9052
__bstr_format(struct scx_sched * sch,u64 * data_buf,char * line_buf,size_t line_size,char * fmt,unsigned long long * data,u32 data__sz)9053 static s32 __bstr_format(struct scx_sched *sch, u64 *data_buf, char *line_buf,
9054 size_t line_size, char *fmt, unsigned long long *data,
9055 u32 data__sz)
9056 {
9057 struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
9058 s32 ret;
9059
9060 if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
9061 (data__sz && !data)) {
9062 scx_error(sch, "invalid data=%p and data__sz=%u", (void *)data, data__sz);
9063 return -EINVAL;
9064 }
9065
9066 ret = copy_from_kernel_nofault(data_buf, data, data__sz);
9067 if (ret < 0) {
9068 scx_error(sch, "failed to read data fields (%d)", ret);
9069 return ret;
9070 }
9071
9072 ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
9073 &bprintf_data);
9074 if (ret < 0) {
9075 scx_error(sch, "format preparation failed (%d)", ret);
9076 return ret;
9077 }
9078
9079 ret = bstr_printf(line_buf, line_size, fmt,
9080 bprintf_data.bin_args);
9081 bpf_bprintf_cleanup(&bprintf_data);
9082 if (ret < 0) {
9083 scx_error(sch, "(\"%s\", %p, %u) failed to format", fmt, data, data__sz);
9084 return ret;
9085 }
9086
9087 return ret;
9088 }
9089
bstr_format(struct scx_sched * sch,struct scx_bstr_buf * buf,char * fmt,unsigned long long * data,u32 data__sz)9090 static s32 bstr_format(struct scx_sched *sch, struct scx_bstr_buf *buf,
9091 char *fmt, unsigned long long *data, u32 data__sz)
9092 {
9093 return __bstr_format(sch, buf->data, buf->line, sizeof(buf->line),
9094 fmt, data, data__sz);
9095 }
9096
9097 __bpf_kfunc_start_defs();
9098
9099 /**
9100 * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
9101 * @exit_code: Exit value to pass to user space via struct scx_exit_info.
9102 * @fmt: error message format string
9103 * @data: format string parameters packaged using ___bpf_fill() macro
9104 * @data__sz: @data len, must end in '__sz' for the verifier
9105 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9106 *
9107 * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
9108 * disabling.
9109 */
scx_bpf_exit_bstr(s64 exit_code,char * fmt,unsigned long long * data,u32 data__sz,const struct bpf_prog_aux * aux)9110 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
9111 unsigned long long *data, u32 data__sz,
9112 const struct bpf_prog_aux *aux)
9113 {
9114 struct scx_sched *sch;
9115 unsigned long flags;
9116
9117 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
9118 sch = scx_prog_sched(aux);
9119 if (likely(sch) &&
9120 bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
9121 scx_exit(sch, SCX_EXIT_UNREG_BPF, exit_code, "%s", scx_exit_bstr_buf.line);
9122 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
9123 }
9124
9125 /**
9126 * scx_bpf_error_bstr - Indicate fatal error
9127 * @fmt: error message format string
9128 * @data: format string parameters packaged using ___bpf_fill() macro
9129 * @data__sz: @data len, must end in '__sz' for the verifier
9130 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9131 *
9132 * Indicate that the BPF scheduler encountered a fatal error and initiate ops
9133 * disabling.
9134 */
scx_bpf_error_bstr(char * fmt,unsigned long long * data,u32 data__sz,const struct bpf_prog_aux * aux)9135 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
9136 u32 data__sz, const struct bpf_prog_aux *aux)
9137 {
9138 struct scx_sched *sch;
9139 unsigned long flags;
9140
9141 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
9142 sch = scx_prog_sched(aux);
9143 if (likely(sch) &&
9144 bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
9145 scx_exit(sch, SCX_EXIT_ERROR_BPF, 0, "%s", scx_exit_bstr_buf.line);
9146 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
9147 }
9148
9149 /**
9150 * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler
9151 * @fmt: format string
9152 * @data: format string parameters packaged using ___bpf_fill() macro
9153 * @data__sz: @data len, must end in '__sz' for the verifier
9154 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9155 *
9156 * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
9157 * dump_task() to generate extra debug dump specific to the BPF scheduler.
9158 *
9159 * The extra dump may be multiple lines. A single line may be split over
9160 * multiple calls. The last line is automatically terminated.
9161 */
scx_bpf_dump_bstr(char * fmt,unsigned long long * data,u32 data__sz,const struct bpf_prog_aux * aux)9162 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
9163 u32 data__sz, const struct bpf_prog_aux *aux)
9164 {
9165 struct scx_sched *sch;
9166 struct scx_dump_data *dd = &scx_dump_data;
9167 struct scx_bstr_buf *buf = &dd->buf;
9168 s32 ret;
9169
9170 guard(rcu)();
9171
9172 sch = scx_prog_sched(aux);
9173 if (unlikely(!sch))
9174 return;
9175
9176 if (raw_smp_processor_id() != dd->cpu) {
9177 scx_error(sch, "scx_bpf_dump() must only be called from ops.dump() and friends");
9178 return;
9179 }
9180
9181 /* append the formatted string to the line buf */
9182 ret = __bstr_format(sch, buf->data, buf->line + dd->cursor,
9183 sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
9184 if (ret < 0) {
9185 dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
9186 dd->prefix, fmt, data, data__sz, ret);
9187 return;
9188 }
9189
9190 dd->cursor += ret;
9191 dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
9192
9193 if (!dd->cursor)
9194 return;
9195
9196 /*
9197 * If the line buf overflowed or ends in a newline, flush it into the
9198 * dump. This is to allow the caller to generate a single line over
9199 * multiple calls. As ops_dump_flush() can also handle multiple lines in
9200 * the line buf, the only case which can lead to an unexpected
9201 * truncation is when the caller keeps generating newlines in the middle
9202 * instead of the end consecutively. Don't do that.
9203 */
9204 if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
9205 ops_dump_flush();
9206 }
9207
9208 /**
9209 * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
9210 * @cpu: CPU of interest
9211 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9212 *
9213 * Return the maximum relative capacity of @cpu in relation to the most
9214 * performant CPU in the system. The return value is in the range [1,
9215 * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
9216 */
scx_bpf_cpuperf_cap(s32 cpu,const struct bpf_prog_aux * aux)9217 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu, const struct bpf_prog_aux *aux)
9218 {
9219 struct scx_sched *sch;
9220
9221 guard(rcu)();
9222
9223 sch = scx_prog_sched(aux);
9224 if (likely(sch) && ops_cpu_valid(sch, cpu, NULL))
9225 return arch_scale_cpu_capacity(cpu);
9226 else
9227 return SCX_CPUPERF_ONE;
9228 }
9229
9230 /**
9231 * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
9232 * @cpu: CPU of interest
9233 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9234 *
9235 * Return the current relative performance of @cpu in relation to its maximum.
9236 * The return value is in the range [1, %SCX_CPUPERF_ONE].
9237 *
9238 * The current performance level of a CPU in relation to the maximum performance
9239 * available in the system can be calculated as follows:
9240 *
9241 * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
9242 *
9243 * The result is in the range [1, %SCX_CPUPERF_ONE].
9244 */
scx_bpf_cpuperf_cur(s32 cpu,const struct bpf_prog_aux * aux)9245 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu, const struct bpf_prog_aux *aux)
9246 {
9247 struct scx_sched *sch;
9248
9249 guard(rcu)();
9250
9251 sch = scx_prog_sched(aux);
9252 if (likely(sch) && ops_cpu_valid(sch, cpu, NULL))
9253 return arch_scale_freq_capacity(cpu);
9254 else
9255 return SCX_CPUPERF_ONE;
9256 }
9257
9258 /**
9259 * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
9260 * @cpu: CPU of interest
9261 * @perf: target performance level [0, %SCX_CPUPERF_ONE]
9262 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9263 *
9264 * Set the target performance level of @cpu to @perf. @perf is in linear
9265 * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
9266 * schedutil cpufreq governor chooses the target frequency.
9267 *
9268 * The actual performance level chosen, CPU grouping, and the overhead and
9269 * latency of the operations are dependent on the hardware and cpufreq driver in
9270 * use. Consult hardware and cpufreq documentation for more information. The
9271 * current performance level can be monitored using scx_bpf_cpuperf_cur().
9272 */
scx_bpf_cpuperf_set(s32 cpu,u32 perf,const struct bpf_prog_aux * aux)9273 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf, const struct bpf_prog_aux *aux)
9274 {
9275 struct scx_sched *sch;
9276
9277 guard(rcu)();
9278
9279 sch = scx_prog_sched(aux);
9280 if (unlikely(!sch))
9281 return;
9282
9283 if (unlikely(perf > SCX_CPUPERF_ONE)) {
9284 scx_error(sch, "Invalid cpuperf target %u for CPU %d", perf, cpu);
9285 return;
9286 }
9287
9288 if (ops_cpu_valid(sch, cpu, NULL)) {
9289 struct rq *rq = cpu_rq(cpu), *locked_rq = scx_locked_rq();
9290 struct rq_flags rf;
9291
9292 /*
9293 * When called with an rq lock held, restrict the operation
9294 * to the corresponding CPU to prevent ABBA deadlocks.
9295 */
9296 if (locked_rq && rq != locked_rq) {
9297 scx_error(sch, "Invalid target CPU %d", cpu);
9298 return;
9299 }
9300
9301 /*
9302 * If no rq lock is held, allow to operate on any CPU by
9303 * acquiring the corresponding rq lock.
9304 */
9305 if (!locked_rq) {
9306 rq_lock_irqsave(rq, &rf);
9307 update_rq_clock(rq);
9308 }
9309
9310 rq->scx.cpuperf_target = perf;
9311 cpufreq_update_util(rq, 0);
9312
9313 if (!locked_rq)
9314 rq_unlock_irqrestore(rq, &rf);
9315 }
9316 }
9317
9318 /**
9319 * scx_bpf_nr_node_ids - Return the number of possible node IDs
9320 *
9321 * All valid node IDs in the system are smaller than the returned value.
9322 */
scx_bpf_nr_node_ids(void)9323 __bpf_kfunc u32 scx_bpf_nr_node_ids(void)
9324 {
9325 return nr_node_ids;
9326 }
9327
9328 /**
9329 * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
9330 *
9331 * All valid CPU IDs in the system are smaller than the returned value.
9332 */
scx_bpf_nr_cpu_ids(void)9333 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
9334 {
9335 return nr_cpu_ids;
9336 }
9337
9338 /**
9339 * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
9340 */
scx_bpf_get_possible_cpumask(void)9341 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
9342 {
9343 return cpu_possible_mask;
9344 }
9345
9346 /**
9347 * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
9348 */
scx_bpf_get_online_cpumask(void)9349 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
9350 {
9351 return cpu_online_mask;
9352 }
9353
9354 /**
9355 * scx_bpf_put_cpumask - Release a possible/online cpumask
9356 * @cpumask: cpumask to release
9357 */
scx_bpf_put_cpumask(const struct cpumask * cpumask)9358 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
9359 {
9360 /*
9361 * Empty function body because we aren't actually acquiring or releasing
9362 * a reference to a global cpumask, which is read-only in the caller and
9363 * is never released. The acquire / release semantics here are just used
9364 * to make the cpumask is a trusted pointer in the caller.
9365 */
9366 }
9367
9368 /**
9369 * scx_bpf_task_running - Is task currently running?
9370 * @p: task of interest
9371 */
scx_bpf_task_running(const struct task_struct * p)9372 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
9373 {
9374 return task_rq(p)->curr == p;
9375 }
9376
9377 /**
9378 * scx_bpf_task_cpu - CPU a task is currently associated with
9379 * @p: task of interest
9380 */
scx_bpf_task_cpu(const struct task_struct * p)9381 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
9382 {
9383 return task_cpu(p);
9384 }
9385
9386 /**
9387 * scx_bpf_cpu_rq - Fetch the rq of a CPU
9388 * @cpu: CPU of the rq
9389 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9390 */
scx_bpf_cpu_rq(s32 cpu,const struct bpf_prog_aux * aux)9391 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu, const struct bpf_prog_aux *aux)
9392 {
9393 struct scx_sched *sch;
9394
9395 guard(rcu)();
9396
9397 sch = scx_prog_sched(aux);
9398 if (unlikely(!sch))
9399 return NULL;
9400
9401 if (!ops_cpu_valid(sch, cpu, NULL))
9402 return NULL;
9403
9404 if (!sch->warned_deprecated_rq) {
9405 printk_deferred(KERN_WARNING "sched_ext: %s() is deprecated; "
9406 "use scx_bpf_locked_rq() when holding rq lock "
9407 "or scx_bpf_cpu_curr() to read remote curr safely.\n", __func__);
9408 sch->warned_deprecated_rq = true;
9409 }
9410
9411 return cpu_rq(cpu);
9412 }
9413
9414 /**
9415 * scx_bpf_locked_rq - Return the rq currently locked by SCX
9416 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9417 *
9418 * Returns the rq if a rq lock is currently held by SCX.
9419 * Otherwise emits an error and returns NULL.
9420 */
scx_bpf_locked_rq(const struct bpf_prog_aux * aux)9421 __bpf_kfunc struct rq *scx_bpf_locked_rq(const struct bpf_prog_aux *aux)
9422 {
9423 struct scx_sched *sch;
9424 struct rq *rq;
9425
9426 guard(preempt)();
9427
9428 sch = scx_prog_sched(aux);
9429 if (unlikely(!sch))
9430 return NULL;
9431
9432 rq = scx_locked_rq();
9433 if (!rq) {
9434 scx_error(sch, "accessing rq without holding rq lock");
9435 return NULL;
9436 }
9437
9438 return rq;
9439 }
9440
9441 /**
9442 * scx_bpf_cpu_curr - Return remote CPU's curr task
9443 * @cpu: CPU of interest
9444 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9445 *
9446 * Callers must hold RCU read lock (KF_RCU).
9447 */
scx_bpf_cpu_curr(s32 cpu,const struct bpf_prog_aux * aux)9448 __bpf_kfunc struct task_struct *scx_bpf_cpu_curr(s32 cpu, const struct bpf_prog_aux *aux)
9449 {
9450 struct scx_sched *sch;
9451
9452 guard(rcu)();
9453
9454 sch = scx_prog_sched(aux);
9455 if (unlikely(!sch))
9456 return NULL;
9457
9458 if (!ops_cpu_valid(sch, cpu, NULL))
9459 return NULL;
9460
9461 return rcu_dereference(cpu_rq(cpu)->curr);
9462 }
9463
9464 /**
9465 * scx_bpf_now - Returns a high-performance monotonically non-decreasing
9466 * clock for the current CPU. The clock returned is in nanoseconds.
9467 *
9468 * It provides the following properties:
9469 *
9470 * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently
9471 * to account for execution time and track tasks' runtime properties.
9472 * Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which
9473 * eventually reads a hardware timestamp counter -- is neither performant nor
9474 * scalable. scx_bpf_now() aims to provide a high-performance clock by
9475 * using the rq clock in the scheduler core whenever possible.
9476 *
9477 * 2) High enough resolution for the BPF scheduler use cases: In most BPF
9478 * scheduler use cases, the required clock resolution is lower than the most
9479 * accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically
9480 * uses the rq clock in the scheduler core whenever it is valid. It considers
9481 * that the rq clock is valid from the time the rq clock is updated
9482 * (update_rq_clock) until the rq is unlocked (rq_unpin_lock).
9483 *
9484 * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now()
9485 * guarantees the clock never goes backward when comparing them in the same
9486 * CPU. On the other hand, when comparing clocks in different CPUs, there
9487 * is no such guarantee -- the clock can go backward. It provides a
9488 * monotonically *non-decreasing* clock so that it would provide the same
9489 * clock values in two different scx_bpf_now() calls in the same CPU
9490 * during the same period of when the rq clock is valid.
9491 */
scx_bpf_now(void)9492 __bpf_kfunc u64 scx_bpf_now(void)
9493 {
9494 struct rq *rq;
9495 u64 clock;
9496
9497 preempt_disable();
9498
9499 rq = this_rq();
9500 if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) {
9501 /*
9502 * If the rq clock is valid, use the cached rq clock.
9503 *
9504 * Note that scx_bpf_now() is re-entrant between a process
9505 * context and an interrupt context (e.g., timer interrupt).
9506 * However, we don't need to consider the race between them
9507 * because such race is not observable from a caller.
9508 */
9509 clock = READ_ONCE(rq->scx.clock);
9510 } else {
9511 /*
9512 * Otherwise, return a fresh rq clock.
9513 *
9514 * The rq clock is updated outside of the rq lock.
9515 * In this case, keep the updated rq clock invalid so the next
9516 * kfunc call outside the rq lock gets a fresh rq clock.
9517 */
9518 clock = sched_clock_cpu(cpu_of(rq));
9519 }
9520
9521 preempt_enable();
9522
9523 return clock;
9524 }
9525
scx_read_events(struct scx_sched * sch,struct scx_event_stats * events)9526 static void scx_read_events(struct scx_sched *sch, struct scx_event_stats *events)
9527 {
9528 struct scx_event_stats *e_cpu;
9529 int cpu;
9530
9531 /* Aggregate per-CPU event counters into @events. */
9532 memset(events, 0, sizeof(*events));
9533 for_each_possible_cpu(cpu) {
9534 e_cpu = &per_cpu_ptr(sch->pcpu, cpu)->event_stats;
9535 scx_agg_event(events, e_cpu, SCX_EV_SELECT_CPU_FALLBACK);
9536 scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
9537 scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_KEEP_LAST);
9538 scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_EXITING);
9539 scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
9540 scx_agg_event(events, e_cpu, SCX_EV_REENQ_IMMED);
9541 scx_agg_event(events, e_cpu, SCX_EV_REENQ_LOCAL_REPEAT);
9542 scx_agg_event(events, e_cpu, SCX_EV_REFILL_SLICE_DFL);
9543 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DURATION);
9544 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DISPATCH);
9545 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_ACTIVATE);
9546 scx_agg_event(events, e_cpu, SCX_EV_INSERT_NOT_OWNED);
9547 scx_agg_event(events, e_cpu, SCX_EV_SUB_BYPASS_DISPATCH);
9548 }
9549 }
9550
9551 /*
9552 * scx_bpf_events - Get a system-wide event counter to
9553 * @events: output buffer from a BPF program
9554 * @events__sz: @events len, must end in '__sz'' for the verifier
9555 */
scx_bpf_events(struct scx_event_stats * events,size_t events__sz)9556 __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events,
9557 size_t events__sz)
9558 {
9559 struct scx_sched *sch;
9560 struct scx_event_stats e_sys;
9561
9562 rcu_read_lock();
9563 sch = rcu_dereference(scx_root);
9564 if (sch)
9565 scx_read_events(sch, &e_sys);
9566 else
9567 memset(&e_sys, 0, sizeof(e_sys));
9568 rcu_read_unlock();
9569
9570 /*
9571 * We cannot entirely trust a BPF-provided size since a BPF program
9572 * might be compiled against a different vmlinux.h, of which
9573 * scx_event_stats would be larger (a newer vmlinux.h) or smaller
9574 * (an older vmlinux.h). Hence, we use the smaller size to avoid
9575 * memory corruption.
9576 */
9577 events__sz = min(events__sz, sizeof(*events));
9578 memcpy(events, &e_sys, events__sz);
9579 }
9580
9581 #ifdef CONFIG_CGROUP_SCHED
9582 /**
9583 * scx_bpf_task_cgroup - Return the sched cgroup of a task
9584 * @p: task of interest
9585 * @aux: implicit BPF argument to access bpf_prog_aux hidden from BPF progs
9586 *
9587 * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
9588 * from the scheduler's POV. SCX operations should use this function to
9589 * determine @p's current cgroup as, unlike following @p->cgroups,
9590 * @p->sched_task_group is stable for the duration of the SCX op. See
9591 * SCX_CALL_OP_TASK() for details.
9592 */
scx_bpf_task_cgroup(struct task_struct * p,const struct bpf_prog_aux * aux)9593 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p,
9594 const struct bpf_prog_aux *aux)
9595 {
9596 struct task_group *tg = p->sched_task_group;
9597 struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
9598 struct scx_sched *sch;
9599
9600 guard(rcu)();
9601
9602 sch = scx_prog_sched(aux);
9603 if (unlikely(!sch))
9604 goto out;
9605
9606 if (!scx_kf_arg_task_ok(sch, p))
9607 goto out;
9608
9609 cgrp = tg_cgrp(tg);
9610
9611 out:
9612 cgroup_get(cgrp);
9613 return cgrp;
9614 }
9615 #endif /* CONFIG_CGROUP_SCHED */
9616
9617 __bpf_kfunc_end_defs();
9618
9619 BTF_KFUNCS_START(scx_kfunc_ids_any)
9620 BTF_ID_FLAGS(func, scx_bpf_task_set_slice, KF_IMPLICIT_ARGS | KF_RCU);
9621 BTF_ID_FLAGS(func, scx_bpf_task_set_dsq_vtime, KF_IMPLICIT_ARGS | KF_RCU);
9622 BTF_ID_FLAGS(func, scx_bpf_kick_cpu, KF_IMPLICIT_ARGS)
9623 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued, KF_IMPLICIT_ARGS)
9624 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq, KF_IMPLICIT_ARGS)
9625 BTF_ID_FLAGS(func, scx_bpf_dsq_peek, KF_IMPLICIT_ARGS | KF_RCU_PROTECTED | KF_RET_NULL)
9626 BTF_ID_FLAGS(func, scx_bpf_dsq_reenq, KF_IMPLICIT_ARGS)
9627 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local___v2, KF_IMPLICIT_ARGS)
9628 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_IMPLICIT_ARGS | KF_ITER_NEW | KF_RCU_PROTECTED)
9629 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
9630 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
9631 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_IMPLICIT_ARGS)
9632 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_IMPLICIT_ARGS)
9633 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_IMPLICIT_ARGS)
9634 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap, KF_IMPLICIT_ARGS)
9635 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur, KF_IMPLICIT_ARGS)
9636 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set, KF_IMPLICIT_ARGS)
9637 BTF_ID_FLAGS(func, scx_bpf_nr_node_ids)
9638 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
9639 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
9640 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
9641 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
9642 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
9643 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
9644 BTF_ID_FLAGS(func, scx_bpf_cpu_rq, KF_IMPLICIT_ARGS)
9645 BTF_ID_FLAGS(func, scx_bpf_locked_rq, KF_IMPLICIT_ARGS | KF_RET_NULL)
9646 BTF_ID_FLAGS(func, scx_bpf_cpu_curr, KF_IMPLICIT_ARGS | KF_RET_NULL | KF_RCU_PROTECTED)
9647 BTF_ID_FLAGS(func, scx_bpf_now)
9648 BTF_ID_FLAGS(func, scx_bpf_events)
9649 #ifdef CONFIG_CGROUP_SCHED
9650 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_IMPLICIT_ARGS | KF_RCU | KF_ACQUIRE)
9651 #endif
9652 BTF_KFUNCS_END(scx_kfunc_ids_any)
9653
9654 static const struct btf_kfunc_id_set scx_kfunc_set_any = {
9655 .owner = THIS_MODULE,
9656 .set = &scx_kfunc_ids_any,
9657 .filter = scx_kfunc_context_filter,
9658 };
9659
9660 /*
9661 * Per-op kfunc allow flags. Each bit corresponds to a context-sensitive kfunc
9662 * group; an op may permit zero or more groups, with the union expressed in
9663 * scx_kf_allow_flags[]. The verifier-time filter (scx_kfunc_context_filter())
9664 * consults this table to decide whether a context-sensitive kfunc is callable
9665 * from a given SCX op.
9666 */
9667 enum scx_kf_allow_flags {
9668 SCX_KF_ALLOW_UNLOCKED = 1 << 0,
9669 SCX_KF_ALLOW_CPU_RELEASE = 1 << 1,
9670 SCX_KF_ALLOW_DISPATCH = 1 << 2,
9671 SCX_KF_ALLOW_ENQUEUE = 1 << 3,
9672 SCX_KF_ALLOW_SELECT_CPU = 1 << 4,
9673 };
9674
9675 /*
9676 * Map each SCX op to the union of kfunc groups it permits, indexed by
9677 * SCX_OP_IDX(op). Ops not listed only permit kfuncs that are not
9678 * context-sensitive.
9679 */
9680 static const u32 scx_kf_allow_flags[] = {
9681 [SCX_OP_IDX(select_cpu)] = SCX_KF_ALLOW_SELECT_CPU | SCX_KF_ALLOW_ENQUEUE,
9682 [SCX_OP_IDX(enqueue)] = SCX_KF_ALLOW_SELECT_CPU | SCX_KF_ALLOW_ENQUEUE,
9683 [SCX_OP_IDX(dispatch)] = SCX_KF_ALLOW_ENQUEUE | SCX_KF_ALLOW_DISPATCH,
9684 [SCX_OP_IDX(cpu_release)] = SCX_KF_ALLOW_CPU_RELEASE,
9685 [SCX_OP_IDX(init_task)] = SCX_KF_ALLOW_UNLOCKED,
9686 [SCX_OP_IDX(dump)] = SCX_KF_ALLOW_UNLOCKED,
9687 #ifdef CONFIG_EXT_GROUP_SCHED
9688 [SCX_OP_IDX(cgroup_init)] = SCX_KF_ALLOW_UNLOCKED,
9689 [SCX_OP_IDX(cgroup_exit)] = SCX_KF_ALLOW_UNLOCKED,
9690 [SCX_OP_IDX(cgroup_prep_move)] = SCX_KF_ALLOW_UNLOCKED,
9691 [SCX_OP_IDX(cgroup_cancel_move)] = SCX_KF_ALLOW_UNLOCKED,
9692 [SCX_OP_IDX(cgroup_set_weight)] = SCX_KF_ALLOW_UNLOCKED,
9693 [SCX_OP_IDX(cgroup_set_bandwidth)] = SCX_KF_ALLOW_UNLOCKED,
9694 [SCX_OP_IDX(cgroup_set_idle)] = SCX_KF_ALLOW_UNLOCKED,
9695 #endif /* CONFIG_EXT_GROUP_SCHED */
9696 [SCX_OP_IDX(sub_attach)] = SCX_KF_ALLOW_UNLOCKED,
9697 [SCX_OP_IDX(sub_detach)] = SCX_KF_ALLOW_UNLOCKED,
9698 [SCX_OP_IDX(cpu_online)] = SCX_KF_ALLOW_UNLOCKED,
9699 [SCX_OP_IDX(cpu_offline)] = SCX_KF_ALLOW_UNLOCKED,
9700 [SCX_OP_IDX(init)] = SCX_KF_ALLOW_UNLOCKED,
9701 [SCX_OP_IDX(exit)] = SCX_KF_ALLOW_UNLOCKED,
9702 };
9703
9704 /*
9705 * Verifier-time filter for SCX kfuncs. Registered via the .filter field on
9706 * each per-group btf_kfunc_id_set. The BPF core invokes this for every kfunc
9707 * call in the registered hook (BPF_PROG_TYPE_STRUCT_OPS or
9708 * BPF_PROG_TYPE_SYSCALL), regardless of which set originally introduced the
9709 * kfunc - so the filter must short-circuit on kfuncs it doesn't govern by
9710 * falling through to "allow" when none of the SCX sets contain the kfunc.
9711 */
scx_kfunc_context_filter(const struct bpf_prog * prog,u32 kfunc_id)9712 int scx_kfunc_context_filter(const struct bpf_prog *prog, u32 kfunc_id)
9713 {
9714 bool in_unlocked = btf_id_set8_contains(&scx_kfunc_ids_unlocked, kfunc_id);
9715 bool in_select_cpu = btf_id_set8_contains(&scx_kfunc_ids_select_cpu, kfunc_id);
9716 bool in_enqueue = btf_id_set8_contains(&scx_kfunc_ids_enqueue_dispatch, kfunc_id);
9717 bool in_dispatch = btf_id_set8_contains(&scx_kfunc_ids_dispatch, kfunc_id);
9718 bool in_cpu_release = btf_id_set8_contains(&scx_kfunc_ids_cpu_release, kfunc_id);
9719 bool in_idle = btf_id_set8_contains(&scx_kfunc_ids_idle, kfunc_id);
9720 bool in_any = btf_id_set8_contains(&scx_kfunc_ids_any, kfunc_id);
9721 u32 moff, flags;
9722
9723 /* Not an SCX kfunc - allow. */
9724 if (!(in_unlocked || in_select_cpu || in_enqueue || in_dispatch ||
9725 in_cpu_release || in_idle || in_any))
9726 return 0;
9727
9728 /* SYSCALL progs (e.g. BPF test_run()) may call unlocked and select_cpu kfuncs. */
9729 if (prog->type == BPF_PROG_TYPE_SYSCALL)
9730 return (in_unlocked || in_select_cpu || in_idle || in_any) ? 0 : -EACCES;
9731
9732 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
9733 return (in_any || in_idle) ? 0 : -EACCES;
9734
9735 /*
9736 * add_subprog_and_kfunc() collects all kfunc calls, including dead code
9737 * guarded by bpf_ksym_exists(), before check_attach_btf_id() sets
9738 * prog->aux->st_ops. Allow all kfuncs when st_ops is not yet set;
9739 * do_check_main() re-runs the filter with st_ops set and enforces the
9740 * actual restrictions.
9741 */
9742 if (!prog->aux->st_ops)
9743 return 0;
9744
9745 /*
9746 * Non-SCX struct_ops: SCX kfuncs are not permitted.
9747 */
9748 if (prog->aux->st_ops != &bpf_sched_ext_ops)
9749 return -EACCES;
9750
9751 /* SCX struct_ops: check the per-op allow list. */
9752 if (in_any || in_idle)
9753 return 0;
9754
9755 moff = prog->aux->attach_st_ops_member_off;
9756 flags = scx_kf_allow_flags[SCX_MOFF_IDX(moff)];
9757
9758 if ((flags & SCX_KF_ALLOW_UNLOCKED) && in_unlocked)
9759 return 0;
9760 if ((flags & SCX_KF_ALLOW_CPU_RELEASE) && in_cpu_release)
9761 return 0;
9762 if ((flags & SCX_KF_ALLOW_DISPATCH) && in_dispatch)
9763 return 0;
9764 if ((flags & SCX_KF_ALLOW_ENQUEUE) && in_enqueue)
9765 return 0;
9766 if ((flags & SCX_KF_ALLOW_SELECT_CPU) && in_select_cpu)
9767 return 0;
9768
9769 return -EACCES;
9770 }
9771
scx_init(void)9772 static int __init scx_init(void)
9773 {
9774 int ret;
9775
9776 /*
9777 * kfunc registration can't be done from init_sched_ext_class() as
9778 * register_btf_kfunc_id_set() needs most of the system to be up.
9779 *
9780 * Some kfuncs are context-sensitive and can only be called from
9781 * specific SCX ops. They are grouped into per-context BTF sets, each
9782 * registered with scx_kfunc_context_filter as its .filter callback. The
9783 * BPF core dedups identical filter pointers per hook
9784 * (btf_populate_kfunc_set()), so the filter is invoked exactly once per
9785 * kfunc lookup; it consults scx_kf_allow_flags[] to enforce per-op
9786 * restrictions at verify time.
9787 */
9788 if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
9789 &scx_kfunc_set_enqueue_dispatch)) ||
9790 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
9791 &scx_kfunc_set_dispatch)) ||
9792 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
9793 &scx_kfunc_set_cpu_release)) ||
9794 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
9795 &scx_kfunc_set_unlocked)) ||
9796 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
9797 &scx_kfunc_set_unlocked)) ||
9798 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
9799 &scx_kfunc_set_any)) ||
9800 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
9801 &scx_kfunc_set_any)) ||
9802 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
9803 &scx_kfunc_set_any))) {
9804 pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
9805 return ret;
9806 }
9807
9808 ret = scx_idle_init();
9809 if (ret) {
9810 pr_err("sched_ext: Failed to initialize idle tracking (%d)\n", ret);
9811 return ret;
9812 }
9813
9814 ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
9815 if (ret) {
9816 pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
9817 return ret;
9818 }
9819
9820 ret = register_pm_notifier(&scx_pm_notifier);
9821 if (ret) {
9822 pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
9823 return ret;
9824 }
9825
9826 scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
9827 if (!scx_kset) {
9828 pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
9829 return -ENOMEM;
9830 }
9831
9832 ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
9833 if (ret < 0) {
9834 pr_err("sched_ext: Failed to add global attributes\n");
9835 return ret;
9836 }
9837
9838 return 0;
9839 }
9840 __initcall(scx_init);
9841