1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4 *
5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8 */
9 #include <linux/btf_ids.h>
10 #include "ext_idle.h"
11
12 /*
13 * NOTE: sched_ext is in the process of growing multiple scheduler support and
14 * scx_root usage is in a transitional state. Naked dereferences are safe if the
15 * caller is one of the tasks attached to SCX and explicit RCU dereference is
16 * necessary otherwise. Naked scx_root dereferences trigger sparse warnings but
17 * are used as temporary markers to indicate that the dereferences need to be
18 * updated to point to the associated scheduler instances rather than scx_root.
19 */
20 static struct scx_sched __rcu *scx_root;
21
22 /*
23 * During exit, a task may schedule after losing its PIDs. When disabling the
24 * BPF scheduler, we need to be able to iterate tasks in every state to
25 * guarantee system safety. Maintain a dedicated task list which contains every
26 * task between its fork and eventual free.
27 */
28 static DEFINE_RAW_SPINLOCK(scx_tasks_lock);
29 static LIST_HEAD(scx_tasks);
30
31 /* ops enable/disable */
32 static DEFINE_MUTEX(scx_enable_mutex);
33 DEFINE_STATIC_KEY_FALSE(__scx_enabled);
34 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
35 static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED);
36 static int scx_bypass_depth;
37 static cpumask_var_t scx_bypass_lb_donee_cpumask;
38 static cpumask_var_t scx_bypass_lb_resched_cpumask;
39 static bool scx_aborting;
40 static bool scx_init_task_enabled;
41 static bool scx_switching_all;
42 DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
43
44 /*
45 * Tracks whether scx_enable() called scx_bypass(true). Used to balance bypass
46 * depth on enable failure. Will be removed when bypass depth is moved into the
47 * sched instance.
48 */
49 static bool scx_bypassed_for_enable;
50
51 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
52 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
53
54 /*
55 * A monotically increasing sequence number that is incremented every time a
56 * scheduler is enabled. This can be used by to check if any custom sched_ext
57 * scheduler has ever been used in the system.
58 */
59 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
60
61 /*
62 * The maximum amount of time in jiffies that a task may be runnable without
63 * being scheduled on a CPU. If this timeout is exceeded, it will trigger
64 * scx_error().
65 */
66 static unsigned long scx_watchdog_timeout;
67
68 /*
69 * The last time the delayed work was run. This delayed work relies on
70 * ksoftirqd being able to run to service timer interrupts, so it's possible
71 * that this work itself could get wedged. To account for this, we check that
72 * it's not stalled in the timer tick, and trigger an error if it is.
73 */
74 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
75
76 static struct delayed_work scx_watchdog_work;
77
78 /*
79 * For %SCX_KICK_WAIT: Each CPU has a pointer to an array of kick_sync sequence
80 * numbers. The arrays are allocated with kvzalloc() as size can exceed percpu
81 * allocator limits on large machines. O(nr_cpu_ids^2) allocation, allocated
82 * lazily when enabling and freed when disabling to avoid waste when sched_ext
83 * isn't active.
84 */
85 struct scx_kick_syncs {
86 struct rcu_head rcu;
87 unsigned long syncs[];
88 };
89
90 static DEFINE_PER_CPU(struct scx_kick_syncs __rcu *, scx_kick_syncs);
91
92 /*
93 * Direct dispatch marker.
94 *
95 * Non-NULL values are used for direct dispatch from enqueue path. A valid
96 * pointer points to the task currently being enqueued. An ERR_PTR value is used
97 * to indicate that direct dispatch has already happened.
98 */
99 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
100
101 static const struct rhashtable_params dsq_hash_params = {
102 .key_len = sizeof_field(struct scx_dispatch_q, id),
103 .key_offset = offsetof(struct scx_dispatch_q, id),
104 .head_offset = offsetof(struct scx_dispatch_q, hash_node),
105 };
106
107 static LLIST_HEAD(dsqs_to_free);
108
109 /* dispatch buf */
110 struct scx_dsp_buf_ent {
111 struct task_struct *task;
112 unsigned long qseq;
113 u64 dsq_id;
114 u64 enq_flags;
115 };
116
117 static u32 scx_dsp_max_batch;
118
119 struct scx_dsp_ctx {
120 struct rq *rq;
121 u32 cursor;
122 u32 nr_tasks;
123 struct scx_dsp_buf_ent buf[];
124 };
125
126 static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
127
128 /* string formatting from BPF */
129 struct scx_bstr_buf {
130 u64 data[MAX_BPRINTF_VARARGS];
131 char line[SCX_EXIT_MSG_LEN];
132 };
133
134 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
135 static struct scx_bstr_buf scx_exit_bstr_buf;
136
137 /* ops debug dump */
138 struct scx_dump_data {
139 s32 cpu;
140 bool first;
141 s32 cursor;
142 struct seq_buf *s;
143 const char *prefix;
144 struct scx_bstr_buf buf;
145 };
146
147 static struct scx_dump_data scx_dump_data = {
148 .cpu = -1,
149 };
150
151 /* /sys/kernel/sched_ext interface */
152 static struct kset *scx_kset;
153
154 /*
155 * Parameters that can be adjusted through /sys/module/sched_ext/parameters.
156 * There usually is no reason to modify these as normal scheduler operation
157 * shouldn't be affected by them. The knobs are primarily for debugging.
158 */
159 static u64 scx_slice_dfl = SCX_SLICE_DFL;
160 static unsigned int scx_slice_bypass_us = SCX_SLICE_BYPASS / NSEC_PER_USEC;
161 static unsigned int scx_bypass_lb_intv_us = SCX_BYPASS_LB_DFL_INTV_US;
162
set_slice_us(const char * val,const struct kernel_param * kp)163 static int set_slice_us(const char *val, const struct kernel_param *kp)
164 {
165 return param_set_uint_minmax(val, kp, 100, 100 * USEC_PER_MSEC);
166 }
167
168 static const struct kernel_param_ops slice_us_param_ops = {
169 .set = set_slice_us,
170 .get = param_get_uint,
171 };
172
set_bypass_lb_intv_us(const char * val,const struct kernel_param * kp)173 static int set_bypass_lb_intv_us(const char *val, const struct kernel_param *kp)
174 {
175 return param_set_uint_minmax(val, kp, 0, 10 * USEC_PER_SEC);
176 }
177
178 static const struct kernel_param_ops bypass_lb_intv_us_param_ops = {
179 .set = set_bypass_lb_intv_us,
180 .get = param_get_uint,
181 };
182
183 #undef MODULE_PARAM_PREFIX
184 #define MODULE_PARAM_PREFIX "sched_ext."
185
186 module_param_cb(slice_bypass_us, &slice_us_param_ops, &scx_slice_bypass_us, 0600);
187 MODULE_PARM_DESC(slice_bypass_us, "bypass slice in microseconds, applied on [un]load (100us to 100ms)");
188 module_param_cb(bypass_lb_intv_us, &bypass_lb_intv_us_param_ops, &scx_bypass_lb_intv_us, 0600);
189 MODULE_PARM_DESC(bypass_lb_intv_us, "bypass load balance interval in microseconds (0 (disable) to 10s)");
190
191 #undef MODULE_PARAM_PREFIX
192
193 #define CREATE_TRACE_POINTS
194 #include <trace/events/sched_ext.h>
195
196 static void process_ddsp_deferred_locals(struct rq *rq);
197 static bool task_dead_and_done(struct task_struct *p);
198 static u32 reenq_local(struct rq *rq);
199 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags);
200 static bool scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind,
201 s64 exit_code, const char *fmt, va_list args);
202
scx_exit(struct scx_sched * sch,enum scx_exit_kind kind,s64 exit_code,const char * fmt,...)203 static __printf(4, 5) bool scx_exit(struct scx_sched *sch,
204 enum scx_exit_kind kind, s64 exit_code,
205 const char *fmt, ...)
206 {
207 va_list args;
208 bool ret;
209
210 va_start(args, fmt);
211 ret = scx_vexit(sch, kind, exit_code, fmt, args);
212 va_end(args);
213
214 return ret;
215 }
216
217 #define scx_error(sch, fmt, args...) scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args)
218 #define scx_verror(sch, fmt, args) scx_vexit((sch), SCX_EXIT_ERROR, 0, fmt, args)
219
220 #define SCX_HAS_OP(sch, op) test_bit(SCX_OP_IDX(op), (sch)->has_op)
221
jiffies_delta_msecs(unsigned long at,unsigned long now)222 static long jiffies_delta_msecs(unsigned long at, unsigned long now)
223 {
224 if (time_after(at, now))
225 return jiffies_to_msecs(at - now);
226 else
227 return -(long)jiffies_to_msecs(now - at);
228 }
229
230 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */
higher_bits(u32 flags)231 static u32 higher_bits(u32 flags)
232 {
233 return ~((1 << fls(flags)) - 1);
234 }
235
236 /* return the mask with only the highest bit set */
highest_bit(u32 flags)237 static u32 highest_bit(u32 flags)
238 {
239 int bit = fls(flags);
240 return ((u64)1 << bit) >> 1;
241 }
242
u32_before(u32 a,u32 b)243 static bool u32_before(u32 a, u32 b)
244 {
245 return (s32)(a - b) < 0;
246 }
247
find_global_dsq(struct scx_sched * sch,struct task_struct * p)248 static struct scx_dispatch_q *find_global_dsq(struct scx_sched *sch,
249 struct task_struct *p)
250 {
251 return sch->global_dsqs[cpu_to_node(task_cpu(p))];
252 }
253
find_user_dsq(struct scx_sched * sch,u64 dsq_id)254 static struct scx_dispatch_q *find_user_dsq(struct scx_sched *sch, u64 dsq_id)
255 {
256 return rhashtable_lookup(&sch->dsq_hash, &dsq_id, dsq_hash_params);
257 }
258
scx_setscheduler_class(struct task_struct * p)259 static const struct sched_class *scx_setscheduler_class(struct task_struct *p)
260 {
261 if (p->sched_class == &stop_sched_class)
262 return &stop_sched_class;
263
264 return __setscheduler_class(p->policy, p->prio);
265 }
266
267 /*
268 * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
269 * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
270 * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
271 * whether it's running from an allowed context.
272 *
273 * @mask is constant, always inline to cull the mask calculations.
274 */
scx_kf_allow(u32 mask)275 static __always_inline void scx_kf_allow(u32 mask)
276 {
277 /* nesting is allowed only in increasing scx_kf_mask order */
278 WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
279 "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
280 current->scx.kf_mask, mask);
281 current->scx.kf_mask |= mask;
282 barrier();
283 }
284
scx_kf_disallow(u32 mask)285 static void scx_kf_disallow(u32 mask)
286 {
287 barrier();
288 current->scx.kf_mask &= ~mask;
289 }
290
291 /*
292 * Track the rq currently locked.
293 *
294 * This allows kfuncs to safely operate on rq from any scx ops callback,
295 * knowing which rq is already locked.
296 */
297 DEFINE_PER_CPU(struct rq *, scx_locked_rq_state);
298
update_locked_rq(struct rq * rq)299 static inline void update_locked_rq(struct rq *rq)
300 {
301 /*
302 * Check whether @rq is actually locked. This can help expose bugs
303 * or incorrect assumptions about the context in which a kfunc or
304 * callback is executed.
305 */
306 if (rq)
307 lockdep_assert_rq_held(rq);
308 __this_cpu_write(scx_locked_rq_state, rq);
309 }
310
311 #define SCX_CALL_OP(sch, mask, op, rq, args...) \
312 do { \
313 if (rq) \
314 update_locked_rq(rq); \
315 if (mask) { \
316 scx_kf_allow(mask); \
317 (sch)->ops.op(args); \
318 scx_kf_disallow(mask); \
319 } else { \
320 (sch)->ops.op(args); \
321 } \
322 if (rq) \
323 update_locked_rq(NULL); \
324 } while (0)
325
326 #define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \
327 ({ \
328 __typeof__((sch)->ops.op(args)) __ret; \
329 \
330 if (rq) \
331 update_locked_rq(rq); \
332 if (mask) { \
333 scx_kf_allow(mask); \
334 __ret = (sch)->ops.op(args); \
335 scx_kf_disallow(mask); \
336 } else { \
337 __ret = (sch)->ops.op(args); \
338 } \
339 if (rq) \
340 update_locked_rq(NULL); \
341 __ret; \
342 })
343
344 /*
345 * Some kfuncs are allowed only on the tasks that are subjects of the
346 * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
347 * restrictions, the following SCX_CALL_OP_*() variants should be used when
348 * invoking scx_ops operations that take task arguments. These can only be used
349 * for non-nesting operations due to the way the tasks are tracked.
350 *
351 * kfuncs which can only operate on such tasks can in turn use
352 * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
353 * the specific task.
354 */
355 #define SCX_CALL_OP_TASK(sch, mask, op, rq, task, args...) \
356 do { \
357 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
358 current->scx.kf_tasks[0] = task; \
359 SCX_CALL_OP((sch), mask, op, rq, task, ##args); \
360 current->scx.kf_tasks[0] = NULL; \
361 } while (0)
362
363 #define SCX_CALL_OP_TASK_RET(sch, mask, op, rq, task, args...) \
364 ({ \
365 __typeof__((sch)->ops.op(task, ##args)) __ret; \
366 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
367 current->scx.kf_tasks[0] = task; \
368 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task, ##args); \
369 current->scx.kf_tasks[0] = NULL; \
370 __ret; \
371 })
372
373 #define SCX_CALL_OP_2TASKS_RET(sch, mask, op, rq, task0, task1, args...) \
374 ({ \
375 __typeof__((sch)->ops.op(task0, task1, ##args)) __ret; \
376 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \
377 current->scx.kf_tasks[0] = task0; \
378 current->scx.kf_tasks[1] = task1; \
379 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task0, task1, ##args); \
380 current->scx.kf_tasks[0] = NULL; \
381 current->scx.kf_tasks[1] = NULL; \
382 __ret; \
383 })
384
385 /* @mask is constant, always inline to cull unnecessary branches */
scx_kf_allowed(struct scx_sched * sch,u32 mask)386 static __always_inline bool scx_kf_allowed(struct scx_sched *sch, u32 mask)
387 {
388 if (unlikely(!(current->scx.kf_mask & mask))) {
389 scx_error(sch, "kfunc with mask 0x%x called from an operation only allowing 0x%x",
390 mask, current->scx.kf_mask);
391 return false;
392 }
393
394 /*
395 * Enforce nesting boundaries. e.g. A kfunc which can be called from
396 * DISPATCH must not be called if we're running DEQUEUE which is nested
397 * inside ops.dispatch(). We don't need to check boundaries for any
398 * blocking kfuncs as the verifier ensures they're only called from
399 * sleepable progs.
400 */
401 if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
402 (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
403 scx_error(sch, "cpu_release kfunc called from a nested operation");
404 return false;
405 }
406
407 if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
408 (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
409 scx_error(sch, "dispatch kfunc called from a nested operation");
410 return false;
411 }
412
413 return true;
414 }
415
416 /* see SCX_CALL_OP_TASK() */
scx_kf_allowed_on_arg_tasks(struct scx_sched * sch,u32 mask,struct task_struct * p)417 static __always_inline bool scx_kf_allowed_on_arg_tasks(struct scx_sched *sch,
418 u32 mask,
419 struct task_struct *p)
420 {
421 if (!scx_kf_allowed(sch, mask))
422 return false;
423
424 if (unlikely((p != current->scx.kf_tasks[0] &&
425 p != current->scx.kf_tasks[1]))) {
426 scx_error(sch, "called on a task not being operated on");
427 return false;
428 }
429
430 return true;
431 }
432
433 /**
434 * nldsq_next_task - Iterate to the next task in a non-local DSQ
435 * @dsq: user dsq being iterated
436 * @cur: current position, %NULL to start iteration
437 * @rev: walk backwards
438 *
439 * Returns %NULL when iteration is finished.
440 */
nldsq_next_task(struct scx_dispatch_q * dsq,struct task_struct * cur,bool rev)441 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
442 struct task_struct *cur, bool rev)
443 {
444 struct list_head *list_node;
445 struct scx_dsq_list_node *dsq_lnode;
446
447 lockdep_assert_held(&dsq->lock);
448
449 if (cur)
450 list_node = &cur->scx.dsq_list.node;
451 else
452 list_node = &dsq->list;
453
454 /* find the next task, need to skip BPF iteration cursors */
455 do {
456 if (rev)
457 list_node = list_node->prev;
458 else
459 list_node = list_node->next;
460
461 if (list_node == &dsq->list)
462 return NULL;
463
464 dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
465 node);
466 } while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
467
468 return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
469 }
470
471 #define nldsq_for_each_task(p, dsq) \
472 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \
473 (p) = nldsq_next_task((dsq), (p), false))
474
475
476 /*
477 * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
478 * dispatch order. BPF-visible iterator is opaque and larger to allow future
479 * changes without breaking backward compatibility. Can be used with
480 * bpf_for_each(). See bpf_iter_scx_dsq_*().
481 */
482 enum scx_dsq_iter_flags {
483 /* iterate in the reverse dispatch order */
484 SCX_DSQ_ITER_REV = 1U << 16,
485
486 __SCX_DSQ_ITER_HAS_SLICE = 1U << 30,
487 __SCX_DSQ_ITER_HAS_VTIME = 1U << 31,
488
489 __SCX_DSQ_ITER_USER_FLAGS = SCX_DSQ_ITER_REV,
490 __SCX_DSQ_ITER_ALL_FLAGS = __SCX_DSQ_ITER_USER_FLAGS |
491 __SCX_DSQ_ITER_HAS_SLICE |
492 __SCX_DSQ_ITER_HAS_VTIME,
493 };
494
495 struct bpf_iter_scx_dsq_kern {
496 struct scx_dsq_list_node cursor;
497 struct scx_dispatch_q *dsq;
498 u64 slice;
499 u64 vtime;
500 } __attribute__((aligned(8)));
501
502 struct bpf_iter_scx_dsq {
503 u64 __opaque[6];
504 } __attribute__((aligned(8)));
505
506
507 /*
508 * SCX task iterator.
509 */
510 struct scx_task_iter {
511 struct sched_ext_entity cursor;
512 struct task_struct *locked_task;
513 struct rq *rq;
514 struct rq_flags rf;
515 u32 cnt;
516 bool list_locked;
517 };
518
519 /**
520 * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
521 * @iter: iterator to init
522 *
523 * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter
524 * must eventually be stopped with scx_task_iter_stop().
525 *
526 * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
527 * between this and the first next() call or between any two next() calls. If
528 * the locks are released between two next() calls, the caller is responsible
529 * for ensuring that the task being iterated remains accessible either through
530 * RCU read lock or obtaining a reference count.
531 *
532 * All tasks which existed when the iteration started are guaranteed to be
533 * visited as long as they are not dead.
534 */
scx_task_iter_start(struct scx_task_iter * iter)535 static void scx_task_iter_start(struct scx_task_iter *iter)
536 {
537 memset(iter, 0, sizeof(*iter));
538
539 raw_spin_lock_irq(&scx_tasks_lock);
540
541 iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
542 list_add(&iter->cursor.tasks_node, &scx_tasks);
543 iter->list_locked = true;
544 }
545
__scx_task_iter_rq_unlock(struct scx_task_iter * iter)546 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
547 {
548 if (iter->locked_task) {
549 __balance_callbacks(iter->rq, &iter->rf);
550 task_rq_unlock(iter->rq, iter->locked_task, &iter->rf);
551 iter->locked_task = NULL;
552 }
553 }
554
555 /**
556 * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
557 * @iter: iterator to unlock
558 *
559 * If @iter is in the middle of a locked iteration, it may be locking the rq of
560 * the task currently being visited in addition to scx_tasks_lock. Unlock both.
561 * This function can be safely called anytime during an iteration. The next
562 * iterator operation will automatically restore the necessary locking.
563 */
scx_task_iter_unlock(struct scx_task_iter * iter)564 static void scx_task_iter_unlock(struct scx_task_iter *iter)
565 {
566 __scx_task_iter_rq_unlock(iter);
567 if (iter->list_locked) {
568 iter->list_locked = false;
569 raw_spin_unlock_irq(&scx_tasks_lock);
570 }
571 }
572
__scx_task_iter_maybe_relock(struct scx_task_iter * iter)573 static void __scx_task_iter_maybe_relock(struct scx_task_iter *iter)
574 {
575 if (!iter->list_locked) {
576 raw_spin_lock_irq(&scx_tasks_lock);
577 iter->list_locked = true;
578 }
579 }
580
581 /**
582 * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
583 * @iter: iterator to exit
584 *
585 * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
586 * which is released on return. If the iterator holds a task's rq lock, that rq
587 * lock is also released. See scx_task_iter_start() for details.
588 */
scx_task_iter_stop(struct scx_task_iter * iter)589 static void scx_task_iter_stop(struct scx_task_iter *iter)
590 {
591 __scx_task_iter_maybe_relock(iter);
592 list_del_init(&iter->cursor.tasks_node);
593 scx_task_iter_unlock(iter);
594 }
595
596 /**
597 * scx_task_iter_next - Next task
598 * @iter: iterator to walk
599 *
600 * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
601 * and re-acquired every %SCX_TASK_ITER_BATCH iterations to avoid causing stalls
602 * by holding scx_tasks_lock for too long.
603 */
scx_task_iter_next(struct scx_task_iter * iter)604 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
605 {
606 struct list_head *cursor = &iter->cursor.tasks_node;
607 struct sched_ext_entity *pos;
608
609 if (!(++iter->cnt % SCX_TASK_ITER_BATCH)) {
610 scx_task_iter_unlock(iter);
611 cond_resched();
612 }
613
614 __scx_task_iter_maybe_relock(iter);
615
616 list_for_each_entry(pos, cursor, tasks_node) {
617 if (&pos->tasks_node == &scx_tasks)
618 return NULL;
619 if (!(pos->flags & SCX_TASK_CURSOR)) {
620 list_move(cursor, &pos->tasks_node);
621 return container_of(pos, struct task_struct, scx);
622 }
623 }
624
625 /* can't happen, should always terminate at scx_tasks above */
626 BUG();
627 }
628
629 /**
630 * scx_task_iter_next_locked - Next non-idle task with its rq locked
631 * @iter: iterator to walk
632 *
633 * Visit the non-idle task with its rq lock held. Allows callers to specify
634 * whether they would like to filter out dead tasks. See scx_task_iter_start()
635 * for details.
636 */
scx_task_iter_next_locked(struct scx_task_iter * iter)637 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
638 {
639 struct task_struct *p;
640
641 __scx_task_iter_rq_unlock(iter);
642
643 while ((p = scx_task_iter_next(iter))) {
644 /*
645 * scx_task_iter is used to prepare and move tasks into SCX
646 * while loading the BPF scheduler and vice-versa while
647 * unloading. The init_tasks ("swappers") should be excluded
648 * from the iteration because:
649 *
650 * - It's unsafe to use __setschduler_prio() on an init_task to
651 * determine the sched_class to use as it won't preserve its
652 * idle_sched_class.
653 *
654 * - ops.init/exit_task() can easily be confused if called with
655 * init_tasks as they, e.g., share PID 0.
656 *
657 * As init_tasks are never scheduled through SCX, they can be
658 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
659 * doesn't work here:
660 *
661 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
662 * yet been onlined.
663 *
664 * - %PF_IDLE can be set on tasks that are not init_tasks. See
665 * play_idle_precise() used by CONFIG_IDLE_INJECT.
666 *
667 * Test for idle_sched_class as only init_tasks are on it.
668 */
669 if (p->sched_class != &idle_sched_class)
670 break;
671 }
672 if (!p)
673 return NULL;
674
675 iter->rq = task_rq_lock(p, &iter->rf);
676 iter->locked_task = p;
677
678 return p;
679 }
680
681 /**
682 * scx_add_event - Increase an event counter for 'name' by 'cnt'
683 * @sch: scx_sched to account events for
684 * @name: an event name defined in struct scx_event_stats
685 * @cnt: the number of the event occurred
686 *
687 * This can be used when preemption is not disabled.
688 */
689 #define scx_add_event(sch, name, cnt) do { \
690 this_cpu_add((sch)->pcpu->event_stats.name, (cnt)); \
691 trace_sched_ext_event(#name, (cnt)); \
692 } while(0)
693
694 /**
695 * __scx_add_event - Increase an event counter for 'name' by 'cnt'
696 * @sch: scx_sched to account events for
697 * @name: an event name defined in struct scx_event_stats
698 * @cnt: the number of the event occurred
699 *
700 * This should be used only when preemption is disabled.
701 */
702 #define __scx_add_event(sch, name, cnt) do { \
703 __this_cpu_add((sch)->pcpu->event_stats.name, (cnt)); \
704 trace_sched_ext_event(#name, cnt); \
705 } while(0)
706
707 /**
708 * scx_agg_event - Aggregate an event counter 'kind' from 'src_e' to 'dst_e'
709 * @dst_e: destination event stats
710 * @src_e: source event stats
711 * @kind: a kind of event to be aggregated
712 */
713 #define scx_agg_event(dst_e, src_e, kind) do { \
714 (dst_e)->kind += READ_ONCE((src_e)->kind); \
715 } while(0)
716
717 /**
718 * scx_dump_event - Dump an event 'kind' in 'events' to 's'
719 * @s: output seq_buf
720 * @events: event stats
721 * @kind: a kind of event to dump
722 */
723 #define scx_dump_event(s, events, kind) do { \
724 dump_line(&(s), "%40s: %16lld", #kind, (events)->kind); \
725 } while (0)
726
727
728 static void scx_read_events(struct scx_sched *sch,
729 struct scx_event_stats *events);
730
scx_enable_state(void)731 static enum scx_enable_state scx_enable_state(void)
732 {
733 return atomic_read(&scx_enable_state_var);
734 }
735
scx_set_enable_state(enum scx_enable_state to)736 static enum scx_enable_state scx_set_enable_state(enum scx_enable_state to)
737 {
738 return atomic_xchg(&scx_enable_state_var, to);
739 }
740
scx_tryset_enable_state(enum scx_enable_state to,enum scx_enable_state from)741 static bool scx_tryset_enable_state(enum scx_enable_state to,
742 enum scx_enable_state from)
743 {
744 int from_v = from;
745
746 return atomic_try_cmpxchg(&scx_enable_state_var, &from_v, to);
747 }
748
749 /**
750 * wait_ops_state - Busy-wait the specified ops state to end
751 * @p: target task
752 * @opss: state to wait the end of
753 *
754 * Busy-wait for @p to transition out of @opss. This can only be used when the
755 * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
756 * has load_acquire semantics to ensure that the caller can see the updates made
757 * in the enqueueing and dispatching paths.
758 */
wait_ops_state(struct task_struct * p,unsigned long opss)759 static void wait_ops_state(struct task_struct *p, unsigned long opss)
760 {
761 do {
762 cpu_relax();
763 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
764 }
765
__cpu_valid(s32 cpu)766 static inline bool __cpu_valid(s32 cpu)
767 {
768 return likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu));
769 }
770
771 /**
772 * ops_cpu_valid - Verify a cpu number, to be used on ops input args
773 * @sch: scx_sched to abort on error
774 * @cpu: cpu number which came from a BPF ops
775 * @where: extra information reported on error
776 *
777 * @cpu is a cpu number which came from the BPF scheduler and can be any value.
778 * Verify that it is in range and one of the possible cpus. If invalid, trigger
779 * an ops error.
780 */
ops_cpu_valid(struct scx_sched * sch,s32 cpu,const char * where)781 static bool ops_cpu_valid(struct scx_sched *sch, s32 cpu, const char *where)
782 {
783 if (__cpu_valid(cpu)) {
784 return true;
785 } else {
786 scx_error(sch, "invalid CPU %d%s%s", cpu, where ? " " : "", where ?: "");
787 return false;
788 }
789 }
790
791 /**
792 * ops_sanitize_err - Sanitize a -errno value
793 * @sch: scx_sched to error out on error
794 * @ops_name: operation to blame on failure
795 * @err: -errno value to sanitize
796 *
797 * Verify @err is a valid -errno. If not, trigger scx_error() and return
798 * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
799 * cause misbehaviors. For an example, a large negative return from
800 * ops.init_task() triggers an oops when passed up the call chain because the
801 * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
802 * handled as a pointer.
803 */
ops_sanitize_err(struct scx_sched * sch,const char * ops_name,s32 err)804 static int ops_sanitize_err(struct scx_sched *sch, const char *ops_name, s32 err)
805 {
806 if (err < 0 && err >= -MAX_ERRNO)
807 return err;
808
809 scx_error(sch, "ops.%s() returned an invalid errno %d", ops_name, err);
810 return -EPROTO;
811 }
812
run_deferred(struct rq * rq)813 static void run_deferred(struct rq *rq)
814 {
815 process_ddsp_deferred_locals(rq);
816
817 if (local_read(&rq->scx.reenq_local_deferred)) {
818 local_set(&rq->scx.reenq_local_deferred, 0);
819 reenq_local(rq);
820 }
821 }
822
deferred_bal_cb_workfn(struct rq * rq)823 static void deferred_bal_cb_workfn(struct rq *rq)
824 {
825 run_deferred(rq);
826 }
827
deferred_irq_workfn(struct irq_work * irq_work)828 static void deferred_irq_workfn(struct irq_work *irq_work)
829 {
830 struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
831
832 raw_spin_rq_lock(rq);
833 run_deferred(rq);
834 raw_spin_rq_unlock(rq);
835 }
836
837 /**
838 * schedule_deferred - Schedule execution of deferred actions on an rq
839 * @rq: target rq
840 *
841 * Schedule execution of deferred actions on @rq. Deferred actions are executed
842 * with @rq locked but unpinned, and thus can unlock @rq to e.g. migrate tasks
843 * to other rqs.
844 */
schedule_deferred(struct rq * rq)845 static void schedule_deferred(struct rq *rq)
846 {
847 /*
848 * Queue an irq work. They are executed on IRQ re-enable which may take
849 * a bit longer than the scheduler hook in schedule_deferred_locked().
850 */
851 irq_work_queue(&rq->scx.deferred_irq_work);
852 }
853
854 /**
855 * schedule_deferred_locked - Schedule execution of deferred actions on an rq
856 * @rq: target rq
857 *
858 * Schedule execution of deferred actions on @rq. Equivalent to
859 * schedule_deferred() but requires @rq to be locked and can be more efficient.
860 */
schedule_deferred_locked(struct rq * rq)861 static void schedule_deferred_locked(struct rq *rq)
862 {
863 lockdep_assert_rq_held(rq);
864
865 /*
866 * If in the middle of waking up a task, task_woken_scx() will be called
867 * afterwards which will then run the deferred actions, no need to
868 * schedule anything.
869 */
870 if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
871 return;
872
873 /* Don't do anything if there already is a deferred operation. */
874 if (rq->scx.flags & SCX_RQ_BAL_CB_PENDING)
875 return;
876
877 /*
878 * If in balance, the balance callbacks will be called before rq lock is
879 * released. Schedule one.
880 *
881 *
882 * We can't directly insert the callback into the
883 * rq's list: The call can drop its lock and make the pending balance
884 * callback visible to unrelated code paths that call rq_pin_lock().
885 *
886 * Just let balance_one() know that it must do it itself.
887 */
888 if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
889 rq->scx.flags |= SCX_RQ_BAL_CB_PENDING;
890 return;
891 }
892
893 /*
894 * No scheduler hooks available. Use the generic irq_work path. The
895 * above WAKEUP and BALANCE paths should cover most of the cases and the
896 * time to IRQ re-enable shouldn't be long.
897 */
898 schedule_deferred(rq);
899 }
900
901 /**
902 * touch_core_sched - Update timestamp used for core-sched task ordering
903 * @rq: rq to read clock from, must be locked
904 * @p: task to update the timestamp for
905 *
906 * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
907 * implement global or local-DSQ FIFO ordering for core-sched. Should be called
908 * when a task becomes runnable and its turn on the CPU ends (e.g. slice
909 * exhaustion).
910 */
touch_core_sched(struct rq * rq,struct task_struct * p)911 static void touch_core_sched(struct rq *rq, struct task_struct *p)
912 {
913 lockdep_assert_rq_held(rq);
914
915 #ifdef CONFIG_SCHED_CORE
916 /*
917 * It's okay to update the timestamp spuriously. Use
918 * sched_core_disabled() which is cheaper than enabled().
919 *
920 * As this is used to determine ordering between tasks of sibling CPUs,
921 * it may be better to use per-core dispatch sequence instead.
922 */
923 if (!sched_core_disabled())
924 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
925 #endif
926 }
927
928 /**
929 * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
930 * @rq: rq to read clock from, must be locked
931 * @p: task being dispatched
932 *
933 * If the BPF scheduler implements custom core-sched ordering via
934 * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
935 * ordering within each local DSQ. This function is called from dispatch paths
936 * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
937 */
touch_core_sched_dispatch(struct rq * rq,struct task_struct * p)938 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
939 {
940 lockdep_assert_rq_held(rq);
941
942 #ifdef CONFIG_SCHED_CORE
943 if (unlikely(SCX_HAS_OP(scx_root, core_sched_before)))
944 touch_core_sched(rq, p);
945 #endif
946 }
947
update_curr_scx(struct rq * rq)948 static void update_curr_scx(struct rq *rq)
949 {
950 struct task_struct *curr = rq->curr;
951 s64 delta_exec;
952
953 delta_exec = update_curr_common(rq);
954 if (unlikely(delta_exec <= 0))
955 return;
956
957 if (curr->scx.slice != SCX_SLICE_INF) {
958 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
959 if (!curr->scx.slice)
960 touch_core_sched(rq, curr);
961 }
962
963 dl_server_update(&rq->ext_server, delta_exec);
964 }
965
scx_dsq_priq_less(struct rb_node * node_a,const struct rb_node * node_b)966 static bool scx_dsq_priq_less(struct rb_node *node_a,
967 const struct rb_node *node_b)
968 {
969 const struct task_struct *a =
970 container_of(node_a, struct task_struct, scx.dsq_priq);
971 const struct task_struct *b =
972 container_of(node_b, struct task_struct, scx.dsq_priq);
973
974 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
975 }
976
dsq_mod_nr(struct scx_dispatch_q * dsq,s32 delta)977 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
978 {
979 /*
980 * scx_bpf_dsq_nr_queued() reads ->nr without locking. Use READ_ONCE()
981 * on the read side and WRITE_ONCE() on the write side to properly
982 * annotate the concurrent lockless access and avoid KCSAN warnings.
983 */
984 WRITE_ONCE(dsq->nr, READ_ONCE(dsq->nr) + delta);
985 }
986
refill_task_slice_dfl(struct scx_sched * sch,struct task_struct * p)987 static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p)
988 {
989 p->scx.slice = READ_ONCE(scx_slice_dfl);
990 __scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1);
991 }
992
local_dsq_post_enq(struct scx_dispatch_q * dsq,struct task_struct * p,u64 enq_flags)993 static void local_dsq_post_enq(struct scx_dispatch_q *dsq, struct task_struct *p,
994 u64 enq_flags)
995 {
996 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
997 bool preempt = false;
998
999 /*
1000 * If @rq is in balance, the CPU is already vacant and looking for the
1001 * next task to run. No need to preempt or trigger resched after moving
1002 * @p into its local DSQ.
1003 */
1004 if (rq->scx.flags & SCX_RQ_IN_BALANCE)
1005 return;
1006
1007 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
1008 rq->curr->sched_class == &ext_sched_class) {
1009 rq->curr->scx.slice = 0;
1010 preempt = true;
1011 }
1012
1013 if (preempt || sched_class_above(&ext_sched_class, rq->curr->sched_class))
1014 resched_curr(rq);
1015 }
1016
dispatch_enqueue(struct scx_sched * sch,struct scx_dispatch_q * dsq,struct task_struct * p,u64 enq_flags)1017 static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
1018 struct task_struct *p, u64 enq_flags)
1019 {
1020 bool is_local = dsq->id == SCX_DSQ_LOCAL;
1021
1022 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1023 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1024 !RB_EMPTY_NODE(&p->scx.dsq_priq));
1025
1026 if (!is_local) {
1027 raw_spin_lock_nested(&dsq->lock,
1028 (enq_flags & SCX_ENQ_NESTED) ? SINGLE_DEPTH_NESTING : 0);
1029
1030 if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1031 scx_error(sch, "attempting to dispatch to a destroyed dsq");
1032 /* fall back to the global dsq */
1033 raw_spin_unlock(&dsq->lock);
1034 dsq = find_global_dsq(sch, p);
1035 raw_spin_lock(&dsq->lock);
1036 }
1037 }
1038
1039 if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1040 (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1041 /*
1042 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1043 * their FIFO queues. To avoid confusion and accidentally
1044 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1045 * disallow any internal DSQ from doing vtime ordering of
1046 * tasks.
1047 */
1048 scx_error(sch, "cannot use vtime ordering for built-in DSQs");
1049 enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1050 }
1051
1052 if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1053 struct rb_node *rbp;
1054
1055 /*
1056 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1057 * linked to both the rbtree and list on PRIQs, this can only be
1058 * tested easily when adding the first task.
1059 */
1060 if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1061 nldsq_next_task(dsq, NULL, false)))
1062 scx_error(sch, "DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1063 dsq->id);
1064
1065 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1066 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1067
1068 /*
1069 * Find the previous task and insert after it on the list so
1070 * that @dsq->list is vtime ordered.
1071 */
1072 rbp = rb_prev(&p->scx.dsq_priq);
1073 if (rbp) {
1074 struct task_struct *prev =
1075 container_of(rbp, struct task_struct,
1076 scx.dsq_priq);
1077 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
1078 /* first task unchanged - no update needed */
1079 } else {
1080 list_add(&p->scx.dsq_list.node, &dsq->list);
1081 /* not builtin and new task is at head - use fastpath */
1082 rcu_assign_pointer(dsq->first_task, p);
1083 }
1084 } else {
1085 /* a FIFO DSQ shouldn't be using PRIQ enqueuing */
1086 if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
1087 scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
1088 dsq->id);
1089
1090 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) {
1091 list_add(&p->scx.dsq_list.node, &dsq->list);
1092 /* new task inserted at head - use fastpath */
1093 if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN))
1094 rcu_assign_pointer(dsq->first_task, p);
1095 } else {
1096 bool was_empty;
1097
1098 was_empty = list_empty(&dsq->list);
1099 list_add_tail(&p->scx.dsq_list.node, &dsq->list);
1100 if (was_empty && !(dsq->id & SCX_DSQ_FLAG_BUILTIN))
1101 rcu_assign_pointer(dsq->first_task, p);
1102 }
1103 }
1104
1105 /* seq records the order tasks are queued, used by BPF DSQ iterator */
1106 WRITE_ONCE(dsq->seq, dsq->seq + 1);
1107 p->scx.dsq_seq = dsq->seq;
1108
1109 dsq_mod_nr(dsq, 1);
1110 p->scx.dsq = dsq;
1111
1112 /*
1113 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
1114 * direct dispatch path, but we clear them here because the direct
1115 * dispatch verdict may be overridden on the enqueue path during e.g.
1116 * bypass.
1117 */
1118 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1119 p->scx.ddsp_enq_flags = 0;
1120
1121 /*
1122 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
1123 * match waiters' load_acquire.
1124 */
1125 if (enq_flags & SCX_ENQ_CLEAR_OPSS)
1126 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1127
1128 if (is_local)
1129 local_dsq_post_enq(dsq, p, enq_flags);
1130 else
1131 raw_spin_unlock(&dsq->lock);
1132 }
1133
task_unlink_from_dsq(struct task_struct * p,struct scx_dispatch_q * dsq)1134 static void task_unlink_from_dsq(struct task_struct *p,
1135 struct scx_dispatch_q *dsq)
1136 {
1137 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
1138
1139 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
1140 rb_erase(&p->scx.dsq_priq, &dsq->priq);
1141 RB_CLEAR_NODE(&p->scx.dsq_priq);
1142 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1143 }
1144
1145 list_del_init(&p->scx.dsq_list.node);
1146 dsq_mod_nr(dsq, -1);
1147
1148 if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN) && dsq->first_task == p) {
1149 struct task_struct *first_task;
1150
1151 first_task = nldsq_next_task(dsq, NULL, false);
1152 rcu_assign_pointer(dsq->first_task, first_task);
1153 }
1154 }
1155
dispatch_dequeue(struct rq * rq,struct task_struct * p)1156 static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
1157 {
1158 struct scx_dispatch_q *dsq = p->scx.dsq;
1159 bool is_local = dsq == &rq->scx.local_dsq;
1160
1161 lockdep_assert_rq_held(rq);
1162
1163 if (!dsq) {
1164 /*
1165 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
1166 * Unlinking is all that's needed to cancel.
1167 */
1168 if (unlikely(!list_empty(&p->scx.dsq_list.node)))
1169 list_del_init(&p->scx.dsq_list.node);
1170
1171 /*
1172 * When dispatching directly from the BPF scheduler to a local
1173 * DSQ, the task isn't associated with any DSQ but
1174 * @p->scx.holding_cpu may be set under the protection of
1175 * %SCX_OPSS_DISPATCHING.
1176 */
1177 if (p->scx.holding_cpu >= 0)
1178 p->scx.holding_cpu = -1;
1179
1180 return;
1181 }
1182
1183 if (!is_local)
1184 raw_spin_lock(&dsq->lock);
1185
1186 /*
1187 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1188 * change underneath us.
1189 */
1190 if (p->scx.holding_cpu < 0) {
1191 /* @p must still be on @dsq, dequeue */
1192 task_unlink_from_dsq(p, dsq);
1193 } else {
1194 /*
1195 * We're racing against dispatch_to_local_dsq() which already
1196 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1197 * holding_cpu which tells dispatch_to_local_dsq() that it lost
1198 * the race.
1199 */
1200 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
1201 p->scx.holding_cpu = -1;
1202 }
1203 p->scx.dsq = NULL;
1204
1205 if (!is_local)
1206 raw_spin_unlock(&dsq->lock);
1207 }
1208
1209 /*
1210 * Abbreviated version of dispatch_dequeue() that can be used when both @p's rq
1211 * and dsq are locked.
1212 */
dispatch_dequeue_locked(struct task_struct * p,struct scx_dispatch_q * dsq)1213 static void dispatch_dequeue_locked(struct task_struct *p,
1214 struct scx_dispatch_q *dsq)
1215 {
1216 lockdep_assert_rq_held(task_rq(p));
1217 lockdep_assert_held(&dsq->lock);
1218
1219 task_unlink_from_dsq(p, dsq);
1220 p->scx.dsq = NULL;
1221 }
1222
find_dsq_for_dispatch(struct scx_sched * sch,struct rq * rq,u64 dsq_id,struct task_struct * p)1223 static struct scx_dispatch_q *find_dsq_for_dispatch(struct scx_sched *sch,
1224 struct rq *rq, u64 dsq_id,
1225 struct task_struct *p)
1226 {
1227 struct scx_dispatch_q *dsq;
1228
1229 if (dsq_id == SCX_DSQ_LOCAL)
1230 return &rq->scx.local_dsq;
1231
1232 if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
1233 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
1234
1235 if (!ops_cpu_valid(sch, cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
1236 return find_global_dsq(sch, p);
1237
1238 return &cpu_rq(cpu)->scx.local_dsq;
1239 }
1240
1241 if (dsq_id == SCX_DSQ_GLOBAL)
1242 dsq = find_global_dsq(sch, p);
1243 else
1244 dsq = find_user_dsq(sch, dsq_id);
1245
1246 if (unlikely(!dsq)) {
1247 scx_error(sch, "non-existent DSQ 0x%llx for %s[%d]",
1248 dsq_id, p->comm, p->pid);
1249 return find_global_dsq(sch, p);
1250 }
1251
1252 return dsq;
1253 }
1254
mark_direct_dispatch(struct scx_sched * sch,struct task_struct * ddsp_task,struct task_struct * p,u64 dsq_id,u64 enq_flags)1255 static void mark_direct_dispatch(struct scx_sched *sch,
1256 struct task_struct *ddsp_task,
1257 struct task_struct *p, u64 dsq_id,
1258 u64 enq_flags)
1259 {
1260 /*
1261 * Mark that dispatch already happened from ops.select_cpu() or
1262 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
1263 * which can never match a valid task pointer.
1264 */
1265 __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
1266
1267 /* @p must match the task on the enqueue path */
1268 if (unlikely(p != ddsp_task)) {
1269 if (IS_ERR(ddsp_task))
1270 scx_error(sch, "%s[%d] already direct-dispatched",
1271 p->comm, p->pid);
1272 else
1273 scx_error(sch, "scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
1274 ddsp_task->comm, ddsp_task->pid,
1275 p->comm, p->pid);
1276 return;
1277 }
1278
1279 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
1280 WARN_ON_ONCE(p->scx.ddsp_enq_flags);
1281
1282 p->scx.ddsp_dsq_id = dsq_id;
1283 p->scx.ddsp_enq_flags = enq_flags;
1284 }
1285
direct_dispatch(struct scx_sched * sch,struct task_struct * p,u64 enq_flags)1286 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
1287 u64 enq_flags)
1288 {
1289 struct rq *rq = task_rq(p);
1290 struct scx_dispatch_q *dsq =
1291 find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
1292
1293 touch_core_sched_dispatch(rq, p);
1294
1295 p->scx.ddsp_enq_flags |= enq_flags;
1296
1297 /*
1298 * We are in the enqueue path with @rq locked and pinned, and thus can't
1299 * double lock a remote rq and enqueue to its local DSQ. For
1300 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
1301 * the enqueue so that it's executed when @rq can be unlocked.
1302 */
1303 if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
1304 unsigned long opss;
1305
1306 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
1307
1308 switch (opss & SCX_OPSS_STATE_MASK) {
1309 case SCX_OPSS_NONE:
1310 break;
1311 case SCX_OPSS_QUEUEING:
1312 /*
1313 * As @p was never passed to the BPF side, _release is
1314 * not strictly necessary. Still do it for consistency.
1315 */
1316 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1317 break;
1318 default:
1319 WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
1320 p->comm, p->pid, opss);
1321 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1322 break;
1323 }
1324
1325 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1326 list_add_tail(&p->scx.dsq_list.node,
1327 &rq->scx.ddsp_deferred_locals);
1328 schedule_deferred_locked(rq);
1329 return;
1330 }
1331
1332 dispatch_enqueue(sch, dsq, p,
1333 p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1334 }
1335
scx_rq_online(struct rq * rq)1336 static bool scx_rq_online(struct rq *rq)
1337 {
1338 /*
1339 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
1340 * the online state as seen from the BPF scheduler. cpu_active() test
1341 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
1342 * stay set until the current scheduling operation is complete even if
1343 * we aren't locking @rq.
1344 */
1345 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
1346 }
1347
do_enqueue_task(struct rq * rq,struct task_struct * p,u64 enq_flags,int sticky_cpu)1348 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
1349 int sticky_cpu)
1350 {
1351 struct scx_sched *sch = scx_root;
1352 struct task_struct **ddsp_taskp;
1353 struct scx_dispatch_q *dsq;
1354 unsigned long qseq;
1355
1356 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
1357
1358 /* rq migration */
1359 if (sticky_cpu == cpu_of(rq))
1360 goto local_norefill;
1361
1362 /*
1363 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
1364 * is offline and are just running the hotplug path. Don't bother the
1365 * BPF scheduler.
1366 */
1367 if (!scx_rq_online(rq))
1368 goto local;
1369
1370 if (scx_rq_bypassing(rq)) {
1371 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
1372 goto bypass;
1373 }
1374
1375 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1376 goto direct;
1377
1378 /* see %SCX_OPS_ENQ_EXITING */
1379 if (!(sch->ops.flags & SCX_OPS_ENQ_EXITING) &&
1380 unlikely(p->flags & PF_EXITING)) {
1381 __scx_add_event(sch, SCX_EV_ENQ_SKIP_EXITING, 1);
1382 goto local;
1383 }
1384
1385 /* see %SCX_OPS_ENQ_MIGRATION_DISABLED */
1386 if (!(sch->ops.flags & SCX_OPS_ENQ_MIGRATION_DISABLED) &&
1387 is_migration_disabled(p)) {
1388 __scx_add_event(sch, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1);
1389 goto local;
1390 }
1391
1392 if (unlikely(!SCX_HAS_OP(sch, enqueue)))
1393 goto global;
1394
1395 /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
1396 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
1397
1398 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
1399 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
1400
1401 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
1402 WARN_ON_ONCE(*ddsp_taskp);
1403 *ddsp_taskp = p;
1404
1405 SCX_CALL_OP_TASK(sch, SCX_KF_ENQUEUE, enqueue, rq, p, enq_flags);
1406
1407 *ddsp_taskp = NULL;
1408 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1409 goto direct;
1410
1411 /*
1412 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
1413 * dequeue may be waiting. The store_release matches their load_acquire.
1414 */
1415 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
1416 return;
1417
1418 direct:
1419 direct_dispatch(sch, p, enq_flags);
1420 return;
1421 local_norefill:
1422 dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags);
1423 return;
1424 local:
1425 dsq = &rq->scx.local_dsq;
1426 goto enqueue;
1427 global:
1428 dsq = find_global_dsq(sch, p);
1429 goto enqueue;
1430 bypass:
1431 dsq = &task_rq(p)->scx.bypass_dsq;
1432 goto enqueue;
1433
1434 enqueue:
1435 /*
1436 * For task-ordering, slice refill must be treated as implying the end
1437 * of the current slice. Otherwise, the longer @p stays on the CPU, the
1438 * higher priority it becomes from scx_prio_less()'s POV.
1439 */
1440 touch_core_sched(rq, p);
1441 refill_task_slice_dfl(sch, p);
1442 dispatch_enqueue(sch, dsq, p, enq_flags);
1443 }
1444
task_runnable(const struct task_struct * p)1445 static bool task_runnable(const struct task_struct *p)
1446 {
1447 return !list_empty(&p->scx.runnable_node);
1448 }
1449
set_task_runnable(struct rq * rq,struct task_struct * p)1450 static void set_task_runnable(struct rq *rq, struct task_struct *p)
1451 {
1452 lockdep_assert_rq_held(rq);
1453
1454 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
1455 p->scx.runnable_at = jiffies;
1456 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
1457 }
1458
1459 /*
1460 * list_add_tail() must be used. scx_bypass() depends on tasks being
1461 * appended to the runnable_list.
1462 */
1463 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
1464 }
1465
clr_task_runnable(struct task_struct * p,bool reset_runnable_at)1466 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
1467 {
1468 list_del_init(&p->scx.runnable_node);
1469 if (reset_runnable_at)
1470 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
1471 }
1472
enqueue_task_scx(struct rq * rq,struct task_struct * p,int core_enq_flags)1473 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int core_enq_flags)
1474 {
1475 struct scx_sched *sch = scx_root;
1476 int sticky_cpu = p->scx.sticky_cpu;
1477 u64 enq_flags = core_enq_flags | rq->scx.extra_enq_flags;
1478
1479 if (enq_flags & ENQUEUE_WAKEUP)
1480 rq->scx.flags |= SCX_RQ_IN_WAKEUP;
1481
1482 if (sticky_cpu >= 0)
1483 p->scx.sticky_cpu = -1;
1484
1485 /*
1486 * Restoring a running task will be immediately followed by
1487 * set_next_task_scx() which expects the task to not be on the BPF
1488 * scheduler as tasks can only start running through local DSQs. Force
1489 * direct-dispatch into the local DSQ by setting the sticky_cpu.
1490 */
1491 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
1492 sticky_cpu = cpu_of(rq);
1493
1494 if (p->scx.flags & SCX_TASK_QUEUED) {
1495 WARN_ON_ONCE(!task_runnable(p));
1496 goto out;
1497 }
1498
1499 set_task_runnable(rq, p);
1500 p->scx.flags |= SCX_TASK_QUEUED;
1501 rq->scx.nr_running++;
1502 add_nr_running(rq, 1);
1503
1504 if (SCX_HAS_OP(sch, runnable) && !task_on_rq_migrating(p))
1505 SCX_CALL_OP_TASK(sch, SCX_KF_REST, runnable, rq, p, enq_flags);
1506
1507 if (enq_flags & SCX_ENQ_WAKEUP)
1508 touch_core_sched(rq, p);
1509
1510 /* Start dl_server if this is the first task being enqueued */
1511 if (rq->scx.nr_running == 1)
1512 dl_server_start(&rq->ext_server);
1513
1514 do_enqueue_task(rq, p, enq_flags, sticky_cpu);
1515 out:
1516 rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
1517
1518 if ((enq_flags & SCX_ENQ_CPU_SELECTED) &&
1519 unlikely(cpu_of(rq) != p->scx.selected_cpu))
1520 __scx_add_event(sch, SCX_EV_SELECT_CPU_FALLBACK, 1);
1521 }
1522
ops_dequeue(struct rq * rq,struct task_struct * p,u64 deq_flags)1523 static void ops_dequeue(struct rq *rq, struct task_struct *p, u64 deq_flags)
1524 {
1525 struct scx_sched *sch = scx_root;
1526 unsigned long opss;
1527
1528 /* dequeue is always temporary, don't reset runnable_at */
1529 clr_task_runnable(p, false);
1530
1531 /* acquire ensures that we see the preceding updates on QUEUED */
1532 opss = atomic_long_read_acquire(&p->scx.ops_state);
1533
1534 switch (opss & SCX_OPSS_STATE_MASK) {
1535 case SCX_OPSS_NONE:
1536 break;
1537 case SCX_OPSS_QUEUEING:
1538 /*
1539 * QUEUEING is started and finished while holding @p's rq lock.
1540 * As we're holding the rq lock now, we shouldn't see QUEUEING.
1541 */
1542 BUG();
1543 case SCX_OPSS_QUEUED:
1544 if (SCX_HAS_OP(sch, dequeue))
1545 SCX_CALL_OP_TASK(sch, SCX_KF_REST, dequeue, rq,
1546 p, deq_flags);
1547
1548 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
1549 SCX_OPSS_NONE))
1550 break;
1551 fallthrough;
1552 case SCX_OPSS_DISPATCHING:
1553 /*
1554 * If @p is being dispatched from the BPF scheduler to a DSQ,
1555 * wait for the transfer to complete so that @p doesn't get
1556 * added to its DSQ after dequeueing is complete.
1557 *
1558 * As we're waiting on DISPATCHING with the rq locked, the
1559 * dispatching side shouldn't try to lock the rq while
1560 * DISPATCHING is set. See dispatch_to_local_dsq().
1561 *
1562 * DISPATCHING shouldn't have qseq set and control can reach
1563 * here with NONE @opss from the above QUEUED case block.
1564 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
1565 */
1566 wait_ops_state(p, SCX_OPSS_DISPATCHING);
1567 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
1568 break;
1569 }
1570 }
1571
dequeue_task_scx(struct rq * rq,struct task_struct * p,int deq_flags)1572 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
1573 {
1574 struct scx_sched *sch = scx_root;
1575
1576 if (!(p->scx.flags & SCX_TASK_QUEUED)) {
1577 WARN_ON_ONCE(task_runnable(p));
1578 return true;
1579 }
1580
1581 ops_dequeue(rq, p, deq_flags);
1582
1583 /*
1584 * A currently running task which is going off @rq first gets dequeued
1585 * and then stops running. As we want running <-> stopping transitions
1586 * to be contained within runnable <-> quiescent transitions, trigger
1587 * ->stopping() early here instead of in put_prev_task_scx().
1588 *
1589 * @p may go through multiple stopping <-> running transitions between
1590 * here and put_prev_task_scx() if task attribute changes occur while
1591 * balance_one() leaves @rq unlocked. However, they don't contain any
1592 * information meaningful to the BPF scheduler and can be suppressed by
1593 * skipping the callbacks if the task is !QUEUED.
1594 */
1595 if (SCX_HAS_OP(sch, stopping) && task_current(rq, p)) {
1596 update_curr_scx(rq);
1597 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, false);
1598 }
1599
1600 if (SCX_HAS_OP(sch, quiescent) && !task_on_rq_migrating(p))
1601 SCX_CALL_OP_TASK(sch, SCX_KF_REST, quiescent, rq, p, deq_flags);
1602
1603 if (deq_flags & SCX_DEQ_SLEEP)
1604 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
1605 else
1606 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
1607
1608 p->scx.flags &= ~SCX_TASK_QUEUED;
1609 rq->scx.nr_running--;
1610 sub_nr_running(rq, 1);
1611
1612 dispatch_dequeue(rq, p);
1613 return true;
1614 }
1615
yield_task_scx(struct rq * rq)1616 static void yield_task_scx(struct rq *rq)
1617 {
1618 struct scx_sched *sch = scx_root;
1619 struct task_struct *p = rq->donor;
1620
1621 if (SCX_HAS_OP(sch, yield))
1622 SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, p, NULL);
1623 else
1624 p->scx.slice = 0;
1625 }
1626
yield_to_task_scx(struct rq * rq,struct task_struct * to)1627 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
1628 {
1629 struct scx_sched *sch = scx_root;
1630 struct task_struct *from = rq->donor;
1631
1632 if (SCX_HAS_OP(sch, yield))
1633 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq,
1634 from, to);
1635 else
1636 return false;
1637 }
1638
move_local_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct rq * dst_rq)1639 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
1640 struct scx_dispatch_q *src_dsq,
1641 struct rq *dst_rq)
1642 {
1643 struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
1644
1645 /* @dsq is locked and @p is on @dst_rq */
1646 lockdep_assert_held(&src_dsq->lock);
1647 lockdep_assert_rq_held(dst_rq);
1648
1649 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
1650
1651 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
1652 list_add(&p->scx.dsq_list.node, &dst_dsq->list);
1653 else
1654 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
1655
1656 dsq_mod_nr(dst_dsq, 1);
1657 p->scx.dsq = dst_dsq;
1658
1659 local_dsq_post_enq(dst_dsq, p, enq_flags);
1660 }
1661
1662 /**
1663 * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
1664 * @p: task to move
1665 * @enq_flags: %SCX_ENQ_*
1666 * @src_rq: rq to move the task from, locked on entry, released on return
1667 * @dst_rq: rq to move the task into, locked on return
1668 *
1669 * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
1670 */
move_remote_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct rq * src_rq,struct rq * dst_rq)1671 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
1672 struct rq *src_rq, struct rq *dst_rq)
1673 {
1674 lockdep_assert_rq_held(src_rq);
1675
1676 /* the following marks @p MIGRATING which excludes dequeue */
1677 deactivate_task(src_rq, p, 0);
1678 set_task_cpu(p, cpu_of(dst_rq));
1679 p->scx.sticky_cpu = cpu_of(dst_rq);
1680
1681 raw_spin_rq_unlock(src_rq);
1682 raw_spin_rq_lock(dst_rq);
1683
1684 /*
1685 * We want to pass scx-specific enq_flags but activate_task() will
1686 * truncate the upper 32 bit. As we own @rq, we can pass them through
1687 * @rq->scx.extra_enq_flags instead.
1688 */
1689 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
1690 WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
1691 dst_rq->scx.extra_enq_flags = enq_flags;
1692 activate_task(dst_rq, p, 0);
1693 dst_rq->scx.extra_enq_flags = 0;
1694 }
1695
1696 /*
1697 * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
1698 * differences:
1699 *
1700 * - is_cpu_allowed() asks "Can this task run on this CPU?" while
1701 * task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
1702 * this CPU?".
1703 *
1704 * While migration is disabled, is_cpu_allowed() has to say "yes" as the task
1705 * must be allowed to finish on the CPU that it's currently on regardless of
1706 * the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
1707 * BPF scheduler shouldn't attempt to migrate a task which has migration
1708 * disabled.
1709 *
1710 * - The BPF scheduler is bypassed while the rq is offline and we can always say
1711 * no to the BPF scheduler initiated migrations while offline.
1712 *
1713 * The caller must ensure that @p and @rq are on different CPUs.
1714 */
task_can_run_on_remote_rq(struct scx_sched * sch,struct task_struct * p,struct rq * rq,bool enforce)1715 static bool task_can_run_on_remote_rq(struct scx_sched *sch,
1716 struct task_struct *p, struct rq *rq,
1717 bool enforce)
1718 {
1719 int cpu = cpu_of(rq);
1720
1721 WARN_ON_ONCE(task_cpu(p) == cpu);
1722
1723 /*
1724 * If @p has migration disabled, @p->cpus_ptr is updated to contain only
1725 * the pinned CPU in migrate_disable_switch() while @p is being switched
1726 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is
1727 * updated and thus another CPU may see @p on a DSQ inbetween leading to
1728 * @p passing the below task_allowed_on_cpu() check while migration is
1729 * disabled.
1730 *
1731 * Test the migration disabled state first as the race window is narrow
1732 * and the BPF scheduler failing to check migration disabled state can
1733 * easily be masked if task_allowed_on_cpu() is done first.
1734 */
1735 if (unlikely(is_migration_disabled(p))) {
1736 if (enforce)
1737 scx_error(sch, "SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
1738 p->comm, p->pid, task_cpu(p), cpu);
1739 return false;
1740 }
1741
1742 /*
1743 * We don't require the BPF scheduler to avoid dispatching to offline
1744 * CPUs mostly for convenience but also because CPUs can go offline
1745 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the
1746 * picked CPU is outside the allowed mask.
1747 */
1748 if (!task_allowed_on_cpu(p, cpu)) {
1749 if (enforce)
1750 scx_error(sch, "SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
1751 cpu, p->comm, p->pid);
1752 return false;
1753 }
1754
1755 if (!scx_rq_online(rq)) {
1756 if (enforce)
1757 __scx_add_event(sch, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1);
1758 return false;
1759 }
1760
1761 return true;
1762 }
1763
1764 /**
1765 * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
1766 * @p: target task
1767 * @dsq: locked DSQ @p is currently on
1768 * @src_rq: rq @p is currently on, stable with @dsq locked
1769 *
1770 * Called with @dsq locked but no rq's locked. We want to move @p to a different
1771 * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
1772 * required when transferring into a local DSQ. Even when transferring into a
1773 * non-local DSQ, it's better to use the same mechanism to protect against
1774 * dequeues and maintain the invariant that @p->scx.dsq can only change while
1775 * @src_rq is locked, which e.g. scx_dump_task() depends on.
1776 *
1777 * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
1778 * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
1779 * this may race with dequeue, which can't drop the rq lock or fail, do a little
1780 * dancing from our side.
1781 *
1782 * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
1783 * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
1784 * would be cleared to -1. While other cpus may have updated it to different
1785 * values afterwards, as this operation can't be preempted or recurse, the
1786 * holding_cpu can never become this CPU again before we're done. Thus, we can
1787 * tell whether we lost to dequeue by testing whether the holding_cpu still
1788 * points to this CPU. See dispatch_dequeue() for the counterpart.
1789 *
1790 * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
1791 * still valid. %false if lost to dequeue.
1792 */
unlink_dsq_and_lock_src_rq(struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)1793 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
1794 struct scx_dispatch_q *dsq,
1795 struct rq *src_rq)
1796 {
1797 s32 cpu = raw_smp_processor_id();
1798
1799 lockdep_assert_held(&dsq->lock);
1800
1801 WARN_ON_ONCE(p->scx.holding_cpu >= 0);
1802 task_unlink_from_dsq(p, dsq);
1803 p->scx.holding_cpu = cpu;
1804
1805 raw_spin_unlock(&dsq->lock);
1806 raw_spin_rq_lock(src_rq);
1807
1808 /* task_rq couldn't have changed if we're still the holding cpu */
1809 return likely(p->scx.holding_cpu == cpu) &&
1810 !WARN_ON_ONCE(src_rq != task_rq(p));
1811 }
1812
consume_remote_task(struct rq * this_rq,struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)1813 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
1814 struct scx_dispatch_q *dsq, struct rq *src_rq)
1815 {
1816 raw_spin_rq_unlock(this_rq);
1817
1818 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
1819 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
1820 return true;
1821 } else {
1822 raw_spin_rq_unlock(src_rq);
1823 raw_spin_rq_lock(this_rq);
1824 return false;
1825 }
1826 }
1827
1828 /**
1829 * move_task_between_dsqs() - Move a task from one DSQ to another
1830 * @sch: scx_sched being operated on
1831 * @p: target task
1832 * @enq_flags: %SCX_ENQ_*
1833 * @src_dsq: DSQ @p is currently on, must not be a local DSQ
1834 * @dst_dsq: DSQ @p is being moved to, can be any DSQ
1835 *
1836 * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
1837 * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
1838 * will change. As @p's task_rq is locked, this function doesn't need to use the
1839 * holding_cpu mechanism.
1840 *
1841 * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
1842 * return value, is locked.
1843 */
move_task_between_dsqs(struct scx_sched * sch,struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct scx_dispatch_q * dst_dsq)1844 static struct rq *move_task_between_dsqs(struct scx_sched *sch,
1845 struct task_struct *p, u64 enq_flags,
1846 struct scx_dispatch_q *src_dsq,
1847 struct scx_dispatch_q *dst_dsq)
1848 {
1849 struct rq *src_rq = task_rq(p), *dst_rq;
1850
1851 BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
1852 lockdep_assert_held(&src_dsq->lock);
1853 lockdep_assert_rq_held(src_rq);
1854
1855 if (dst_dsq->id == SCX_DSQ_LOCAL) {
1856 dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
1857 if (src_rq != dst_rq &&
1858 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
1859 dst_dsq = find_global_dsq(sch, p);
1860 dst_rq = src_rq;
1861 }
1862 } else {
1863 /* no need to migrate if destination is a non-local DSQ */
1864 dst_rq = src_rq;
1865 }
1866
1867 /*
1868 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
1869 * CPU, @p will be migrated.
1870 */
1871 if (dst_dsq->id == SCX_DSQ_LOCAL) {
1872 /* @p is going from a non-local DSQ to a local DSQ */
1873 if (src_rq == dst_rq) {
1874 task_unlink_from_dsq(p, src_dsq);
1875 move_local_task_to_local_dsq(p, enq_flags,
1876 src_dsq, dst_rq);
1877 raw_spin_unlock(&src_dsq->lock);
1878 } else {
1879 raw_spin_unlock(&src_dsq->lock);
1880 move_remote_task_to_local_dsq(p, enq_flags,
1881 src_rq, dst_rq);
1882 }
1883 } else {
1884 /*
1885 * @p is going from a non-local DSQ to a non-local DSQ. As
1886 * $src_dsq is already locked, do an abbreviated dequeue.
1887 */
1888 dispatch_dequeue_locked(p, src_dsq);
1889 raw_spin_unlock(&src_dsq->lock);
1890
1891 dispatch_enqueue(sch, dst_dsq, p, enq_flags);
1892 }
1893
1894 return dst_rq;
1895 }
1896
consume_dispatch_q(struct scx_sched * sch,struct rq * rq,struct scx_dispatch_q * dsq)1897 static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq,
1898 struct scx_dispatch_q *dsq)
1899 {
1900 struct task_struct *p;
1901 retry:
1902 /*
1903 * The caller can't expect to successfully consume a task if the task's
1904 * addition to @dsq isn't guaranteed to be visible somehow. Test
1905 * @dsq->list without locking and skip if it seems empty.
1906 */
1907 if (list_empty(&dsq->list))
1908 return false;
1909
1910 raw_spin_lock(&dsq->lock);
1911
1912 nldsq_for_each_task(p, dsq) {
1913 struct rq *task_rq = task_rq(p);
1914
1915 /*
1916 * This loop can lead to multiple lockup scenarios, e.g. the BPF
1917 * scheduler can put an enormous number of affinitized tasks into
1918 * a contended DSQ, or the outer retry loop can repeatedly race
1919 * against scx_bypass() dequeueing tasks from @dsq trying to put
1920 * the system into the bypass mode. This can easily live-lock the
1921 * machine. If aborting, exit from all non-bypass DSQs.
1922 */
1923 if (unlikely(READ_ONCE(scx_aborting)) && dsq->id != SCX_DSQ_BYPASS)
1924 break;
1925
1926 if (rq == task_rq) {
1927 task_unlink_from_dsq(p, dsq);
1928 move_local_task_to_local_dsq(p, 0, dsq, rq);
1929 raw_spin_unlock(&dsq->lock);
1930 return true;
1931 }
1932
1933 if (task_can_run_on_remote_rq(sch, p, rq, false)) {
1934 if (likely(consume_remote_task(rq, p, dsq, task_rq)))
1935 return true;
1936 goto retry;
1937 }
1938 }
1939
1940 raw_spin_unlock(&dsq->lock);
1941 return false;
1942 }
1943
consume_global_dsq(struct scx_sched * sch,struct rq * rq)1944 static bool consume_global_dsq(struct scx_sched *sch, struct rq *rq)
1945 {
1946 int node = cpu_to_node(cpu_of(rq));
1947
1948 return consume_dispatch_q(sch, rq, sch->global_dsqs[node]);
1949 }
1950
1951 /**
1952 * dispatch_to_local_dsq - Dispatch a task to a local dsq
1953 * @sch: scx_sched being operated on
1954 * @rq: current rq which is locked
1955 * @dst_dsq: destination DSQ
1956 * @p: task to dispatch
1957 * @enq_flags: %SCX_ENQ_*
1958 *
1959 * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
1960 * DSQ. This function performs all the synchronization dancing needed because
1961 * local DSQs are protected with rq locks.
1962 *
1963 * The caller must have exclusive ownership of @p (e.g. through
1964 * %SCX_OPSS_DISPATCHING).
1965 */
dispatch_to_local_dsq(struct scx_sched * sch,struct rq * rq,struct scx_dispatch_q * dst_dsq,struct task_struct * p,u64 enq_flags)1966 static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
1967 struct scx_dispatch_q *dst_dsq,
1968 struct task_struct *p, u64 enq_flags)
1969 {
1970 struct rq *src_rq = task_rq(p);
1971 struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
1972 struct rq *locked_rq = rq;
1973
1974 /*
1975 * We're synchronized against dequeue through DISPATCHING. As @p can't
1976 * be dequeued, its task_rq and cpus_allowed are stable too.
1977 *
1978 * If dispatching to @rq that @p is already on, no lock dancing needed.
1979 */
1980 if (rq == src_rq && rq == dst_rq) {
1981 dispatch_enqueue(sch, dst_dsq, p,
1982 enq_flags | SCX_ENQ_CLEAR_OPSS);
1983 return;
1984 }
1985
1986 if (src_rq != dst_rq &&
1987 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
1988 dispatch_enqueue(sch, find_global_dsq(sch, p), p,
1989 enq_flags | SCX_ENQ_CLEAR_OPSS);
1990 return;
1991 }
1992
1993 /*
1994 * @p is on a possibly remote @src_rq which we need to lock to move the
1995 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
1996 * on DISPATCHING, so we can't grab @src_rq lock while holding
1997 * DISPATCHING.
1998 *
1999 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2000 * we're moving from a DSQ and use the same mechanism - mark the task
2001 * under transfer with holding_cpu, release DISPATCHING and then follow
2002 * the same protocol. See unlink_dsq_and_lock_src_rq().
2003 */
2004 p->scx.holding_cpu = raw_smp_processor_id();
2005
2006 /* store_release ensures that dequeue sees the above */
2007 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2008
2009 /* switch to @src_rq lock */
2010 if (locked_rq != src_rq) {
2011 raw_spin_rq_unlock(locked_rq);
2012 locked_rq = src_rq;
2013 raw_spin_rq_lock(src_rq);
2014 }
2015
2016 /* task_rq couldn't have changed if we're still the holding cpu */
2017 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2018 !WARN_ON_ONCE(src_rq != task_rq(p))) {
2019 /*
2020 * If @p is staying on the same rq, there's no need to go
2021 * through the full deactivate/activate cycle. Optimize by
2022 * abbreviating move_remote_task_to_local_dsq().
2023 */
2024 if (src_rq == dst_rq) {
2025 p->scx.holding_cpu = -1;
2026 dispatch_enqueue(sch, &dst_rq->scx.local_dsq, p,
2027 enq_flags);
2028 } else {
2029 move_remote_task_to_local_dsq(p, enq_flags,
2030 src_rq, dst_rq);
2031 /* task has been moved to dst_rq, which is now locked */
2032 locked_rq = dst_rq;
2033 }
2034
2035 /* if the destination CPU is idle, wake it up */
2036 if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2037 resched_curr(dst_rq);
2038 }
2039
2040 /* switch back to @rq lock */
2041 if (locked_rq != rq) {
2042 raw_spin_rq_unlock(locked_rq);
2043 raw_spin_rq_lock(rq);
2044 }
2045 }
2046
2047 /**
2048 * finish_dispatch - Asynchronously finish dispatching a task
2049 * @rq: current rq which is locked
2050 * @p: task to finish dispatching
2051 * @qseq_at_dispatch: qseq when @p started getting dispatched
2052 * @dsq_id: destination DSQ ID
2053 * @enq_flags: %SCX_ENQ_*
2054 *
2055 * Dispatching to local DSQs may need to wait for queueing to complete or
2056 * require rq lock dancing. As we don't wanna do either while inside
2057 * ops.dispatch() to avoid locking order inversion, we split dispatching into
2058 * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the
2059 * task and its qseq. Once ops.dispatch() returns, this function is called to
2060 * finish up.
2061 *
2062 * There is no guarantee that @p is still valid for dispatching or even that it
2063 * was valid in the first place. Make sure that the task is still owned by the
2064 * BPF scheduler and claim the ownership before dispatching.
2065 */
finish_dispatch(struct scx_sched * sch,struct rq * rq,struct task_struct * p,unsigned long qseq_at_dispatch,u64 dsq_id,u64 enq_flags)2066 static void finish_dispatch(struct scx_sched *sch, struct rq *rq,
2067 struct task_struct *p,
2068 unsigned long qseq_at_dispatch,
2069 u64 dsq_id, u64 enq_flags)
2070 {
2071 struct scx_dispatch_q *dsq;
2072 unsigned long opss;
2073
2074 touch_core_sched_dispatch(rq, p);
2075 retry:
2076 /*
2077 * No need for _acquire here. @p is accessed only after a successful
2078 * try_cmpxchg to DISPATCHING.
2079 */
2080 opss = atomic_long_read(&p->scx.ops_state);
2081
2082 switch (opss & SCX_OPSS_STATE_MASK) {
2083 case SCX_OPSS_DISPATCHING:
2084 case SCX_OPSS_NONE:
2085 /* someone else already got to it */
2086 return;
2087 case SCX_OPSS_QUEUED:
2088 /*
2089 * If qseq doesn't match, @p has gone through at least one
2090 * dispatch/dequeue and re-enqueue cycle between
2091 * scx_bpf_dsq_insert() and here and we have no claim on it.
2092 */
2093 if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
2094 return;
2095
2096 /*
2097 * While we know @p is accessible, we don't yet have a claim on
2098 * it - the BPF scheduler is allowed to dispatch tasks
2099 * spuriously and there can be a racing dequeue attempt. Let's
2100 * claim @p by atomically transitioning it from QUEUED to
2101 * DISPATCHING.
2102 */
2103 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2104 SCX_OPSS_DISPATCHING)))
2105 break;
2106 goto retry;
2107 case SCX_OPSS_QUEUEING:
2108 /*
2109 * do_enqueue_task() is in the process of transferring the task
2110 * to the BPF scheduler while holding @p's rq lock. As we aren't
2111 * holding any kernel or BPF resource that the enqueue path may
2112 * depend upon, it's safe to wait.
2113 */
2114 wait_ops_state(p, opss);
2115 goto retry;
2116 }
2117
2118 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
2119
2120 dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, p);
2121
2122 if (dsq->id == SCX_DSQ_LOCAL)
2123 dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
2124 else
2125 dispatch_enqueue(sch, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2126 }
2127
flush_dispatch_buf(struct scx_sched * sch,struct rq * rq)2128 static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
2129 {
2130 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2131 u32 u;
2132
2133 for (u = 0; u < dspc->cursor; u++) {
2134 struct scx_dsp_buf_ent *ent = &dspc->buf[u];
2135
2136 finish_dispatch(sch, rq, ent->task, ent->qseq, ent->dsq_id,
2137 ent->enq_flags);
2138 }
2139
2140 dspc->nr_tasks += dspc->cursor;
2141 dspc->cursor = 0;
2142 }
2143
maybe_queue_balance_callback(struct rq * rq)2144 static inline void maybe_queue_balance_callback(struct rq *rq)
2145 {
2146 lockdep_assert_rq_held(rq);
2147
2148 if (!(rq->scx.flags & SCX_RQ_BAL_CB_PENDING))
2149 return;
2150
2151 queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
2152 deferred_bal_cb_workfn);
2153
2154 rq->scx.flags &= ~SCX_RQ_BAL_CB_PENDING;
2155 }
2156
balance_one(struct rq * rq,struct task_struct * prev)2157 static int balance_one(struct rq *rq, struct task_struct *prev)
2158 {
2159 struct scx_sched *sch = scx_root;
2160 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2161 bool prev_on_scx = prev->sched_class == &ext_sched_class;
2162 bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED;
2163 int nr_loops = SCX_DSP_MAX_LOOPS;
2164
2165 lockdep_assert_rq_held(rq);
2166 rq->scx.flags |= SCX_RQ_IN_BALANCE;
2167 rq->scx.flags &= ~SCX_RQ_BAL_KEEP;
2168
2169 if ((sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT) &&
2170 unlikely(rq->scx.cpu_released)) {
2171 /*
2172 * If the previous sched_class for the current CPU was not SCX,
2173 * notify the BPF scheduler that it again has control of the
2174 * core. This callback complements ->cpu_release(), which is
2175 * emitted in switch_class().
2176 */
2177 if (SCX_HAS_OP(sch, cpu_acquire))
2178 SCX_CALL_OP(sch, SCX_KF_REST, cpu_acquire, rq,
2179 cpu_of(rq), NULL);
2180 rq->scx.cpu_released = false;
2181 }
2182
2183 if (prev_on_scx) {
2184 update_curr_scx(rq);
2185
2186 /*
2187 * If @prev is runnable & has slice left, it has priority and
2188 * fetching more just increases latency for the fetched tasks.
2189 * Tell pick_task_scx() to keep running @prev. If the BPF
2190 * scheduler wants to handle this explicitly, it should
2191 * implement ->cpu_release().
2192 *
2193 * See scx_disable_workfn() for the explanation on the bypassing
2194 * test.
2195 */
2196 if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) {
2197 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2198 goto has_tasks;
2199 }
2200 }
2201
2202 /* if there already are tasks to run, nothing to do */
2203 if (rq->scx.local_dsq.nr)
2204 goto has_tasks;
2205
2206 if (consume_global_dsq(sch, rq))
2207 goto has_tasks;
2208
2209 if (scx_rq_bypassing(rq)) {
2210 if (consume_dispatch_q(sch, rq, &rq->scx.bypass_dsq))
2211 goto has_tasks;
2212 else
2213 goto no_tasks;
2214 }
2215
2216 if (unlikely(!SCX_HAS_OP(sch, dispatch)) || !scx_rq_online(rq))
2217 goto no_tasks;
2218
2219 dspc->rq = rq;
2220
2221 /*
2222 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
2223 * the local DSQ might still end up empty after a successful
2224 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
2225 * produced some tasks, retry. The BPF scheduler may depend on this
2226 * looping behavior to simplify its implementation.
2227 */
2228 do {
2229 dspc->nr_tasks = 0;
2230
2231 SCX_CALL_OP(sch, SCX_KF_DISPATCH, dispatch, rq,
2232 cpu_of(rq), prev_on_scx ? prev : NULL);
2233
2234 flush_dispatch_buf(sch, rq);
2235
2236 if (prev_on_rq && prev->scx.slice) {
2237 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2238 goto has_tasks;
2239 }
2240 if (rq->scx.local_dsq.nr)
2241 goto has_tasks;
2242 if (consume_global_dsq(sch, rq))
2243 goto has_tasks;
2244
2245 /*
2246 * ops.dispatch() can trap us in this loop by repeatedly
2247 * dispatching ineligible tasks. Break out once in a while to
2248 * allow the watchdog to run. As IRQ can't be enabled in
2249 * balance(), we want to complete this scheduling cycle and then
2250 * start a new one. IOW, we want to call resched_curr() on the
2251 * next, most likely idle, task, not the current one. Use
2252 * scx_kick_cpu() for deferred kicking.
2253 */
2254 if (unlikely(!--nr_loops)) {
2255 scx_kick_cpu(sch, cpu_of(rq), 0);
2256 break;
2257 }
2258 } while (dspc->nr_tasks);
2259
2260 no_tasks:
2261 /*
2262 * Didn't find another task to run. Keep running @prev unless
2263 * %SCX_OPS_ENQ_LAST is in effect.
2264 */
2265 if (prev_on_rq &&
2266 (!(sch->ops.flags & SCX_OPS_ENQ_LAST) || scx_rq_bypassing(rq))) {
2267 rq->scx.flags |= SCX_RQ_BAL_KEEP;
2268 __scx_add_event(sch, SCX_EV_DISPATCH_KEEP_LAST, 1);
2269 goto has_tasks;
2270 }
2271 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2272 return false;
2273
2274 has_tasks:
2275 rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2276 return true;
2277 }
2278
process_ddsp_deferred_locals(struct rq * rq)2279 static void process_ddsp_deferred_locals(struct rq *rq)
2280 {
2281 struct task_struct *p;
2282
2283 lockdep_assert_rq_held(rq);
2284
2285 /*
2286 * Now that @rq can be unlocked, execute the deferred enqueueing of
2287 * tasks directly dispatched to the local DSQs of other CPUs. See
2288 * direct_dispatch(). Keep popping from the head instead of using
2289 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
2290 * temporarily.
2291 */
2292 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
2293 struct task_struct, scx.dsq_list.node))) {
2294 struct scx_sched *sch = scx_root;
2295 struct scx_dispatch_q *dsq;
2296
2297 list_del_init(&p->scx.dsq_list.node);
2298
2299 dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
2300 if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
2301 dispatch_to_local_dsq(sch, rq, dsq, p,
2302 p->scx.ddsp_enq_flags);
2303 }
2304 }
2305
set_next_task_scx(struct rq * rq,struct task_struct * p,bool first)2306 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
2307 {
2308 struct scx_sched *sch = scx_root;
2309
2310 if (p->scx.flags & SCX_TASK_QUEUED) {
2311 /*
2312 * Core-sched might decide to execute @p before it is
2313 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
2314 */
2315 ops_dequeue(rq, p, SCX_DEQ_CORE_SCHED_EXEC);
2316 dispatch_dequeue(rq, p);
2317 }
2318
2319 p->se.exec_start = rq_clock_task(rq);
2320
2321 /* see dequeue_task_scx() on why we skip when !QUEUED */
2322 if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED))
2323 SCX_CALL_OP_TASK(sch, SCX_KF_REST, running, rq, p);
2324
2325 clr_task_runnable(p, true);
2326
2327 /*
2328 * @p is getting newly scheduled or got kicked after someone updated its
2329 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
2330 */
2331 if ((p->scx.slice == SCX_SLICE_INF) !=
2332 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
2333 if (p->scx.slice == SCX_SLICE_INF)
2334 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
2335 else
2336 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
2337
2338 sched_update_tick_dependency(rq);
2339
2340 /*
2341 * For now, let's refresh the load_avgs just when transitioning
2342 * in and out of nohz. In the future, we might want to add a
2343 * mechanism which calls the following periodically on
2344 * tick-stopped CPUs.
2345 */
2346 update_other_load_avgs(rq);
2347 }
2348 }
2349
2350 static enum scx_cpu_preempt_reason
preempt_reason_from_class(const struct sched_class * class)2351 preempt_reason_from_class(const struct sched_class *class)
2352 {
2353 if (class == &stop_sched_class)
2354 return SCX_CPU_PREEMPT_STOP;
2355 if (class == &dl_sched_class)
2356 return SCX_CPU_PREEMPT_DL;
2357 if (class == &rt_sched_class)
2358 return SCX_CPU_PREEMPT_RT;
2359 return SCX_CPU_PREEMPT_UNKNOWN;
2360 }
2361
switch_class(struct rq * rq,struct task_struct * next)2362 static void switch_class(struct rq *rq, struct task_struct *next)
2363 {
2364 struct scx_sched *sch = scx_root;
2365 const struct sched_class *next_class = next->sched_class;
2366
2367 if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT))
2368 return;
2369
2370 /*
2371 * The callback is conceptually meant to convey that the CPU is no
2372 * longer under the control of SCX. Therefore, don't invoke the callback
2373 * if the next class is below SCX (in which case the BPF scheduler has
2374 * actively decided not to schedule any tasks on the CPU).
2375 */
2376 if (sched_class_above(&ext_sched_class, next_class))
2377 return;
2378
2379 /*
2380 * At this point we know that SCX was preempted by a higher priority
2381 * sched_class, so invoke the ->cpu_release() callback if we have not
2382 * done so already. We only send the callback once between SCX being
2383 * preempted, and it regaining control of the CPU.
2384 *
2385 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
2386 * next time that balance_one() is invoked.
2387 */
2388 if (!rq->scx.cpu_released) {
2389 if (SCX_HAS_OP(sch, cpu_release)) {
2390 struct scx_cpu_release_args args = {
2391 .reason = preempt_reason_from_class(next_class),
2392 .task = next,
2393 };
2394
2395 SCX_CALL_OP(sch, SCX_KF_CPU_RELEASE, cpu_release, rq,
2396 cpu_of(rq), &args);
2397 }
2398 rq->scx.cpu_released = true;
2399 }
2400 }
2401
put_prev_task_scx(struct rq * rq,struct task_struct * p,struct task_struct * next)2402 static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
2403 struct task_struct *next)
2404 {
2405 struct scx_sched *sch = scx_root;
2406
2407 /* see kick_sync_wait_bal_cb() */
2408 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
2409
2410 update_curr_scx(rq);
2411
2412 /* see dequeue_task_scx() on why we skip when !QUEUED */
2413 if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED))
2414 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, true);
2415
2416 if (p->scx.flags & SCX_TASK_QUEUED) {
2417 set_task_runnable(rq, p);
2418
2419 /*
2420 * If @p has slice left and is being put, @p is getting
2421 * preempted by a higher priority scheduler class or core-sched
2422 * forcing a different task. Leave it at the head of the local
2423 * DSQ.
2424 */
2425 if (p->scx.slice && !scx_rq_bypassing(rq)) {
2426 dispatch_enqueue(sch, &rq->scx.local_dsq, p,
2427 SCX_ENQ_HEAD);
2428 goto switch_class;
2429 }
2430
2431 /*
2432 * If @p is runnable but we're about to enter a lower
2433 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
2434 * ops.enqueue() that @p is the only one available for this cpu,
2435 * which should trigger an explicit follow-up scheduling event.
2436 */
2437 if (next && sched_class_above(&ext_sched_class, next->sched_class)) {
2438 WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST));
2439 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
2440 } else {
2441 do_enqueue_task(rq, p, 0, -1);
2442 }
2443 }
2444
2445 switch_class:
2446 if (next && next->sched_class != &ext_sched_class)
2447 switch_class(rq, next);
2448 }
2449
kick_sync_wait_bal_cb(struct rq * rq)2450 static void kick_sync_wait_bal_cb(struct rq *rq)
2451 {
2452 struct scx_kick_syncs __rcu *ks = __this_cpu_read(scx_kick_syncs);
2453 unsigned long *ksyncs = rcu_dereference_sched(ks)->syncs;
2454 bool waited;
2455 s32 cpu;
2456
2457 /*
2458 * Drop rq lock and enable IRQs while waiting. IRQs must be enabled
2459 * — a target CPU may be waiting for us to process an IPI (e.g. TLB
2460 * flush) while we wait for its kick_sync to advance.
2461 *
2462 * Also, keep advancing our own kick_sync so that new kick_sync waits
2463 * targeting us, which can start after we drop the lock, cannot form
2464 * cyclic dependencies.
2465 */
2466 retry:
2467 waited = false;
2468 for_each_cpu(cpu, rq->scx.cpus_to_sync) {
2469 /*
2470 * smp_load_acquire() pairs with smp_store_release() on
2471 * kick_sync updates on the target CPUs.
2472 */
2473 if (cpu == cpu_of(rq) ||
2474 smp_load_acquire(&cpu_rq(cpu)->scx.kick_sync) != ksyncs[cpu]) {
2475 cpumask_clear_cpu(cpu, rq->scx.cpus_to_sync);
2476 continue;
2477 }
2478
2479 raw_spin_rq_unlock_irq(rq);
2480 while (READ_ONCE(cpu_rq(cpu)->scx.kick_sync) == ksyncs[cpu]) {
2481 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
2482 cpu_relax();
2483 }
2484 raw_spin_rq_lock_irq(rq);
2485 waited = true;
2486 }
2487
2488 if (waited)
2489 goto retry;
2490 }
2491
first_local_task(struct rq * rq)2492 static struct task_struct *first_local_task(struct rq *rq)
2493 {
2494 return list_first_entry_or_null(&rq->scx.local_dsq.list,
2495 struct task_struct, scx.dsq_list.node);
2496 }
2497
2498 static struct task_struct *
do_pick_task_scx(struct rq * rq,struct rq_flags * rf,bool force_scx)2499 do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx)
2500 {
2501 struct task_struct *prev = rq->curr;
2502 bool keep_prev;
2503 struct task_struct *p;
2504
2505 /* see kick_sync_wait_bal_cb() */
2506 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
2507
2508 rq_modified_begin(rq, &ext_sched_class);
2509
2510 rq_unpin_lock(rq, rf);
2511 balance_one(rq, prev);
2512 rq_repin_lock(rq, rf);
2513 maybe_queue_balance_callback(rq);
2514
2515 /*
2516 * Defer to a balance callback which can drop rq lock and enable
2517 * IRQs. Waiting directly in the pick path would deadlock against
2518 * CPUs sending us IPIs (e.g. TLB flushes) while we wait for them.
2519 */
2520 if (unlikely(rq->scx.kick_sync_pending)) {
2521 rq->scx.kick_sync_pending = false;
2522 queue_balance_callback(rq, &rq->scx.kick_sync_bal_cb,
2523 kick_sync_wait_bal_cb);
2524 }
2525
2526 /*
2527 * If any higher-priority sched class enqueued a runnable task on
2528 * this rq during balance_one(), abort and return RETRY_TASK, so
2529 * that the scheduler loop can restart.
2530 *
2531 * If @force_scx is true, always try to pick a SCHED_EXT task,
2532 * regardless of any higher-priority sched classes activity.
2533 */
2534 if (!force_scx && rq_modified_above(rq, &ext_sched_class))
2535 return RETRY_TASK;
2536
2537 keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
2538 if (unlikely(keep_prev &&
2539 prev->sched_class != &ext_sched_class)) {
2540 WARN_ON_ONCE(scx_enable_state() == SCX_ENABLED);
2541 keep_prev = false;
2542 }
2543
2544 /*
2545 * If balance_one() is telling us to keep running @prev, replenish slice
2546 * if necessary and keep running @prev. Otherwise, pop the first one
2547 * from the local DSQ.
2548 */
2549 if (keep_prev) {
2550 p = prev;
2551 if (!p->scx.slice)
2552 refill_task_slice_dfl(rcu_dereference_sched(scx_root), p);
2553 } else {
2554 p = first_local_task(rq);
2555 if (!p)
2556 return NULL;
2557
2558 if (unlikely(!p->scx.slice)) {
2559 struct scx_sched *sch = rcu_dereference_sched(scx_root);
2560
2561 if (!scx_rq_bypassing(rq) && !sch->warned_zero_slice) {
2562 printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n",
2563 p->comm, p->pid, __func__);
2564 sch->warned_zero_slice = true;
2565 }
2566 refill_task_slice_dfl(sch, p);
2567 }
2568 }
2569
2570 return p;
2571 }
2572
pick_task_scx(struct rq * rq,struct rq_flags * rf)2573 static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf)
2574 {
2575 return do_pick_task_scx(rq, rf, false);
2576 }
2577
2578 /*
2579 * Select the next task to run from the ext scheduling class.
2580 *
2581 * Use do_pick_task_scx() directly with @force_scx enabled, since the
2582 * dl_server must always select a sched_ext task.
2583 */
2584 static struct task_struct *
ext_server_pick_task(struct sched_dl_entity * dl_se,struct rq_flags * rf)2585 ext_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf)
2586 {
2587 if (!scx_enabled())
2588 return NULL;
2589
2590 return do_pick_task_scx(dl_se->rq, rf, true);
2591 }
2592
2593 /*
2594 * Initialize the ext server deadline entity.
2595 */
ext_server_init(struct rq * rq)2596 void ext_server_init(struct rq *rq)
2597 {
2598 struct sched_dl_entity *dl_se = &rq->ext_server;
2599
2600 init_dl_entity(dl_se);
2601
2602 dl_server_init(dl_se, rq, ext_server_pick_task);
2603 }
2604
2605 #ifdef CONFIG_SCHED_CORE
2606 /**
2607 * scx_prio_less - Task ordering for core-sched
2608 * @a: task A
2609 * @b: task B
2610 * @in_fi: in forced idle state
2611 *
2612 * Core-sched is implemented as an additional scheduling layer on top of the
2613 * usual sched_class'es and needs to find out the expected task ordering. For
2614 * SCX, core-sched calls this function to interrogate the task ordering.
2615 *
2616 * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
2617 * to implement the default task ordering. The older the timestamp, the higher
2618 * priority the task - the global FIFO ordering matching the default scheduling
2619 * behavior.
2620 *
2621 * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
2622 * implement FIFO ordering within each local DSQ. See pick_task_scx().
2623 */
scx_prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)2624 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
2625 bool in_fi)
2626 {
2627 struct scx_sched *sch = scx_root;
2628
2629 /*
2630 * The const qualifiers are dropped from task_struct pointers when
2631 * calling ops.core_sched_before(). Accesses are controlled by the
2632 * verifier.
2633 */
2634 if (SCX_HAS_OP(sch, core_sched_before) &&
2635 !scx_rq_bypassing(task_rq(a)))
2636 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, core_sched_before,
2637 NULL,
2638 (struct task_struct *)a,
2639 (struct task_struct *)b);
2640 else
2641 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
2642 }
2643 #endif /* CONFIG_SCHED_CORE */
2644
select_task_rq_scx(struct task_struct * p,int prev_cpu,int wake_flags)2645 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
2646 {
2647 struct scx_sched *sch = scx_root;
2648 bool rq_bypass;
2649
2650 /*
2651 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
2652 * can be a good migration opportunity with low cache and memory
2653 * footprint. Returning a CPU different than @prev_cpu triggers
2654 * immediate rq migration. However, for SCX, as the current rq
2655 * association doesn't dictate where the task is going to run, this
2656 * doesn't fit well. If necessary, we can later add a dedicated method
2657 * which can decide to preempt self to force it through the regular
2658 * scheduling path.
2659 */
2660 if (unlikely(wake_flags & WF_EXEC))
2661 return prev_cpu;
2662
2663 rq_bypass = scx_rq_bypassing(task_rq(p));
2664 if (likely(SCX_HAS_OP(sch, select_cpu)) && !rq_bypass) {
2665 s32 cpu;
2666 struct task_struct **ddsp_taskp;
2667
2668 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
2669 WARN_ON_ONCE(*ddsp_taskp);
2670 *ddsp_taskp = p;
2671
2672 cpu = SCX_CALL_OP_TASK_RET(sch,
2673 SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
2674 select_cpu, NULL, p, prev_cpu,
2675 wake_flags);
2676 p->scx.selected_cpu = cpu;
2677 *ddsp_taskp = NULL;
2678 if (ops_cpu_valid(sch, cpu, "from ops.select_cpu()"))
2679 return cpu;
2680 else
2681 return prev_cpu;
2682 } else {
2683 s32 cpu;
2684
2685 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0);
2686 if (cpu >= 0) {
2687 refill_task_slice_dfl(sch, p);
2688 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
2689 } else {
2690 cpu = prev_cpu;
2691 }
2692 p->scx.selected_cpu = cpu;
2693
2694 if (rq_bypass)
2695 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1);
2696 return cpu;
2697 }
2698 }
2699
task_woken_scx(struct rq * rq,struct task_struct * p)2700 static void task_woken_scx(struct rq *rq, struct task_struct *p)
2701 {
2702 run_deferred(rq);
2703 }
2704
set_cpus_allowed_scx(struct task_struct * p,struct affinity_context * ac)2705 static void set_cpus_allowed_scx(struct task_struct *p,
2706 struct affinity_context *ac)
2707 {
2708 struct scx_sched *sch = scx_root;
2709
2710 set_cpus_allowed_common(p, ac);
2711
2712 if (task_dead_and_done(p))
2713 return;
2714
2715 /*
2716 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
2717 * differ from the configured one in @p->cpus_mask. Always tell the bpf
2718 * scheduler the effective one.
2719 *
2720 * Fine-grained memory write control is enforced by BPF making the const
2721 * designation pointless. Cast it away when calling the operation.
2722 */
2723 if (SCX_HAS_OP(sch, set_cpumask))
2724 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, NULL,
2725 p, (struct cpumask *)p->cpus_ptr);
2726 }
2727
handle_hotplug(struct rq * rq,bool online)2728 static void handle_hotplug(struct rq *rq, bool online)
2729 {
2730 struct scx_sched *sch = scx_root;
2731 int cpu = cpu_of(rq);
2732
2733 atomic_long_inc(&scx_hotplug_seq);
2734
2735 /*
2736 * scx_root updates are protected by cpus_read_lock() and will stay
2737 * stable here. Note that we can't depend on scx_enabled() test as the
2738 * hotplug ops need to be enabled before __scx_enabled is set.
2739 */
2740 if (unlikely(!sch))
2741 return;
2742
2743 if (scx_enabled())
2744 scx_idle_update_selcpu_topology(&sch->ops);
2745
2746 if (online && SCX_HAS_OP(sch, cpu_online))
2747 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_online, NULL, cpu);
2748 else if (!online && SCX_HAS_OP(sch, cpu_offline))
2749 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_offline, NULL, cpu);
2750 else
2751 scx_exit(sch, SCX_EXIT_UNREG_KERN,
2752 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
2753 "cpu %d going %s, exiting scheduler", cpu,
2754 online ? "online" : "offline");
2755 }
2756
scx_rq_activate(struct rq * rq)2757 void scx_rq_activate(struct rq *rq)
2758 {
2759 handle_hotplug(rq, true);
2760 }
2761
scx_rq_deactivate(struct rq * rq)2762 void scx_rq_deactivate(struct rq *rq)
2763 {
2764 handle_hotplug(rq, false);
2765 }
2766
rq_online_scx(struct rq * rq)2767 static void rq_online_scx(struct rq *rq)
2768 {
2769 rq->scx.flags |= SCX_RQ_ONLINE;
2770 }
2771
rq_offline_scx(struct rq * rq)2772 static void rq_offline_scx(struct rq *rq)
2773 {
2774 rq->scx.flags &= ~SCX_RQ_ONLINE;
2775 }
2776
2777
check_rq_for_timeouts(struct rq * rq)2778 static bool check_rq_for_timeouts(struct rq *rq)
2779 {
2780 struct scx_sched *sch;
2781 struct task_struct *p;
2782 struct rq_flags rf;
2783 bool timed_out = false;
2784
2785 rq_lock_irqsave(rq, &rf);
2786 sch = rcu_dereference_bh(scx_root);
2787 if (unlikely(!sch))
2788 goto out_unlock;
2789
2790 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
2791 unsigned long last_runnable = p->scx.runnable_at;
2792
2793 if (unlikely(time_after(jiffies,
2794 last_runnable + READ_ONCE(scx_watchdog_timeout)))) {
2795 u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
2796
2797 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0,
2798 "%s[%d] failed to run for %u.%03us",
2799 p->comm, p->pid, dur_ms / 1000, dur_ms % 1000);
2800 timed_out = true;
2801 break;
2802 }
2803 }
2804 out_unlock:
2805 rq_unlock_irqrestore(rq, &rf);
2806 return timed_out;
2807 }
2808
scx_watchdog_workfn(struct work_struct * work)2809 static void scx_watchdog_workfn(struct work_struct *work)
2810 {
2811 int cpu;
2812
2813 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
2814
2815 for_each_online_cpu(cpu) {
2816 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
2817 break;
2818
2819 cond_resched();
2820 }
2821 queue_delayed_work(system_unbound_wq, to_delayed_work(work),
2822 READ_ONCE(scx_watchdog_timeout) / 2);
2823 }
2824
scx_tick(struct rq * rq)2825 void scx_tick(struct rq *rq)
2826 {
2827 struct scx_sched *sch;
2828 unsigned long last_check;
2829
2830 if (!scx_enabled())
2831 return;
2832
2833 sch = rcu_dereference_bh(scx_root);
2834 if (unlikely(!sch))
2835 return;
2836
2837 last_check = READ_ONCE(scx_watchdog_timestamp);
2838 if (unlikely(time_after(jiffies,
2839 last_check + READ_ONCE(scx_watchdog_timeout)))) {
2840 u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
2841
2842 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0,
2843 "watchdog failed to check in for %u.%03us",
2844 dur_ms / 1000, dur_ms % 1000);
2845 }
2846
2847 update_other_load_avgs(rq);
2848 }
2849
task_tick_scx(struct rq * rq,struct task_struct * curr,int queued)2850 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
2851 {
2852 struct scx_sched *sch = scx_root;
2853
2854 update_curr_scx(rq);
2855
2856 /*
2857 * While disabling, always resched and refresh core-sched timestamp as
2858 * we can't trust the slice management or ops.core_sched_before().
2859 */
2860 if (scx_rq_bypassing(rq)) {
2861 curr->scx.slice = 0;
2862 touch_core_sched(rq, curr);
2863 } else if (SCX_HAS_OP(sch, tick)) {
2864 SCX_CALL_OP_TASK(sch, SCX_KF_REST, tick, rq, curr);
2865 }
2866
2867 if (!curr->scx.slice)
2868 resched_curr(rq);
2869 }
2870
2871 #ifdef CONFIG_EXT_GROUP_SCHED
tg_cgrp(struct task_group * tg)2872 static struct cgroup *tg_cgrp(struct task_group *tg)
2873 {
2874 /*
2875 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
2876 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
2877 * root cgroup.
2878 */
2879 if (tg && tg->css.cgroup)
2880 return tg->css.cgroup;
2881 else
2882 return &cgrp_dfl_root.cgrp;
2883 }
2884
2885 #define SCX_INIT_TASK_ARGS_CGROUP(tg) .cgroup = tg_cgrp(tg),
2886
2887 #else /* CONFIG_EXT_GROUP_SCHED */
2888
2889 #define SCX_INIT_TASK_ARGS_CGROUP(tg)
2890
2891 #endif /* CONFIG_EXT_GROUP_SCHED */
2892
scx_get_task_state(const struct task_struct * p)2893 static enum scx_task_state scx_get_task_state(const struct task_struct *p)
2894 {
2895 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
2896 }
2897
scx_set_task_state(struct task_struct * p,enum scx_task_state state)2898 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
2899 {
2900 enum scx_task_state prev_state = scx_get_task_state(p);
2901 bool warn = false;
2902
2903 BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
2904
2905 switch (state) {
2906 case SCX_TASK_NONE:
2907 break;
2908 case SCX_TASK_INIT:
2909 warn = prev_state != SCX_TASK_NONE;
2910 break;
2911 case SCX_TASK_READY:
2912 warn = prev_state == SCX_TASK_NONE;
2913 break;
2914 case SCX_TASK_ENABLED:
2915 warn = prev_state != SCX_TASK_READY;
2916 break;
2917 default:
2918 warn = true;
2919 return;
2920 }
2921
2922 WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
2923 prev_state, state, p->comm, p->pid);
2924
2925 p->scx.flags &= ~SCX_TASK_STATE_MASK;
2926 p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
2927 }
2928
scx_init_task(struct task_struct * p,struct task_group * tg,bool fork)2929 static int scx_init_task(struct task_struct *p, struct task_group *tg, bool fork)
2930 {
2931 struct scx_sched *sch = scx_root;
2932 int ret;
2933
2934 p->scx.disallow = false;
2935
2936 if (SCX_HAS_OP(sch, init_task)) {
2937 struct scx_init_task_args args = {
2938 SCX_INIT_TASK_ARGS_CGROUP(tg)
2939 .fork = fork,
2940 };
2941
2942 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init_task, NULL,
2943 p, &args);
2944 if (unlikely(ret)) {
2945 ret = ops_sanitize_err(sch, "init_task", ret);
2946 return ret;
2947 }
2948 }
2949
2950 scx_set_task_state(p, SCX_TASK_INIT);
2951
2952 if (p->scx.disallow) {
2953 if (!fork) {
2954 struct rq *rq;
2955 struct rq_flags rf;
2956
2957 rq = task_rq_lock(p, &rf);
2958
2959 /*
2960 * We're in the load path and @p->policy will be applied
2961 * right after. Reverting @p->policy here and rejecting
2962 * %SCHED_EXT transitions from scx_check_setscheduler()
2963 * guarantees that if ops.init_task() sets @p->disallow,
2964 * @p can never be in SCX.
2965 */
2966 if (p->policy == SCHED_EXT) {
2967 p->policy = SCHED_NORMAL;
2968 atomic_long_inc(&scx_nr_rejected);
2969 }
2970
2971 task_rq_unlock(rq, p, &rf);
2972 } else if (p->policy == SCHED_EXT) {
2973 scx_error(sch, "ops.init_task() set task->scx.disallow for %s[%d] during fork",
2974 p->comm, p->pid);
2975 }
2976 }
2977
2978 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
2979 return 0;
2980 }
2981
scx_enable_task(struct task_struct * p)2982 static void scx_enable_task(struct task_struct *p)
2983 {
2984 struct scx_sched *sch = scx_root;
2985 struct rq *rq = task_rq(p);
2986 u32 weight;
2987
2988 lockdep_assert_rq_held(rq);
2989
2990 /*
2991 * Set the weight before calling ops.enable() so that the scheduler
2992 * doesn't see a stale value if they inspect the task struct.
2993 */
2994 if (task_has_idle_policy(p))
2995 weight = WEIGHT_IDLEPRIO;
2996 else
2997 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
2998
2999 p->scx.weight = sched_weight_to_cgroup(weight);
3000
3001 if (SCX_HAS_OP(sch, enable))
3002 SCX_CALL_OP_TASK(sch, SCX_KF_REST, enable, rq, p);
3003 scx_set_task_state(p, SCX_TASK_ENABLED);
3004
3005 if (SCX_HAS_OP(sch, set_weight))
3006 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,
3007 p, p->scx.weight);
3008 }
3009
scx_disable_task(struct task_struct * p)3010 static void scx_disable_task(struct task_struct *p)
3011 {
3012 struct scx_sched *sch = scx_root;
3013 struct rq *rq = task_rq(p);
3014
3015 lockdep_assert_rq_held(rq);
3016 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
3017
3018 if (SCX_HAS_OP(sch, disable))
3019 SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p);
3020 scx_set_task_state(p, SCX_TASK_READY);
3021 }
3022
scx_exit_task(struct task_struct * p)3023 static void scx_exit_task(struct task_struct *p)
3024 {
3025 struct scx_sched *sch = scx_root;
3026 struct scx_exit_task_args args = {
3027 .cancelled = false,
3028 };
3029
3030 lockdep_assert_rq_held(task_rq(p));
3031
3032 switch (scx_get_task_state(p)) {
3033 case SCX_TASK_NONE:
3034 return;
3035 case SCX_TASK_INIT:
3036 args.cancelled = true;
3037 break;
3038 case SCX_TASK_READY:
3039 break;
3040 case SCX_TASK_ENABLED:
3041 scx_disable_task(p);
3042 break;
3043 default:
3044 WARN_ON_ONCE(true);
3045 return;
3046 }
3047
3048 if (SCX_HAS_OP(sch, exit_task))
3049 SCX_CALL_OP_TASK(sch, SCX_KF_REST, exit_task, task_rq(p),
3050 p, &args);
3051 scx_set_task_state(p, SCX_TASK_NONE);
3052 }
3053
init_scx_entity(struct sched_ext_entity * scx)3054 void init_scx_entity(struct sched_ext_entity *scx)
3055 {
3056 memset(scx, 0, sizeof(*scx));
3057 INIT_LIST_HEAD(&scx->dsq_list.node);
3058 RB_CLEAR_NODE(&scx->dsq_priq);
3059 scx->sticky_cpu = -1;
3060 scx->holding_cpu = -1;
3061 INIT_LIST_HEAD(&scx->runnable_node);
3062 scx->runnable_at = jiffies;
3063 scx->ddsp_dsq_id = SCX_DSQ_INVALID;
3064 scx->slice = READ_ONCE(scx_slice_dfl);
3065 }
3066
scx_pre_fork(struct task_struct * p)3067 void scx_pre_fork(struct task_struct *p)
3068 {
3069 /*
3070 * BPF scheduler enable/disable paths want to be able to iterate and
3071 * update all tasks which can become complex when racing forks. As
3072 * enable/disable are very cold paths, let's use a percpu_rwsem to
3073 * exclude forks.
3074 */
3075 percpu_down_read(&scx_fork_rwsem);
3076 }
3077
scx_fork(struct task_struct * p)3078 int scx_fork(struct task_struct *p)
3079 {
3080 percpu_rwsem_assert_held(&scx_fork_rwsem);
3081
3082 if (scx_init_task_enabled)
3083 return scx_init_task(p, task_group(p), true);
3084 else
3085 return 0;
3086 }
3087
scx_post_fork(struct task_struct * p)3088 void scx_post_fork(struct task_struct *p)
3089 {
3090 if (scx_init_task_enabled) {
3091 scx_set_task_state(p, SCX_TASK_READY);
3092
3093 /*
3094 * Enable the task immediately if it's running on sched_ext.
3095 * Otherwise, it'll be enabled in switching_to_scx() if and
3096 * when it's ever configured to run with a SCHED_EXT policy.
3097 */
3098 if (p->sched_class == &ext_sched_class) {
3099 struct rq_flags rf;
3100 struct rq *rq;
3101
3102 rq = task_rq_lock(p, &rf);
3103 scx_enable_task(p);
3104 task_rq_unlock(rq, p, &rf);
3105 }
3106 }
3107
3108 raw_spin_lock_irq(&scx_tasks_lock);
3109 list_add_tail(&p->scx.tasks_node, &scx_tasks);
3110 raw_spin_unlock_irq(&scx_tasks_lock);
3111
3112 percpu_up_read(&scx_fork_rwsem);
3113 }
3114
scx_cancel_fork(struct task_struct * p)3115 void scx_cancel_fork(struct task_struct *p)
3116 {
3117 if (scx_enabled()) {
3118 struct rq *rq;
3119 struct rq_flags rf;
3120
3121 rq = task_rq_lock(p, &rf);
3122 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
3123 scx_exit_task(p);
3124 task_rq_unlock(rq, p, &rf);
3125 }
3126
3127 percpu_up_read(&scx_fork_rwsem);
3128 }
3129
3130 /**
3131 * task_dead_and_done - Is a task dead and done running?
3132 * @p: target task
3133 *
3134 * Once sched_ext_dead() removes the dead task from scx_tasks and exits it, the
3135 * task no longer exists from SCX's POV. However, certain sched_class ops may be
3136 * invoked on these dead tasks leading to failures - e.g. sched_setscheduler()
3137 * may try to switch a task which finished sched_ext_dead() back into SCX
3138 * triggering invalid SCX task state transitions and worse.
3139 *
3140 * Once a task has finished the final switch, sched_ext_dead() is the only thing
3141 * that needs to happen on the task. Use this test to short-circuit sched_class
3142 * operations which may be called on dead tasks.
3143 */
task_dead_and_done(struct task_struct * p)3144 static bool task_dead_and_done(struct task_struct *p)
3145 {
3146 struct rq *rq = task_rq(p);
3147
3148 lockdep_assert_rq_held(rq);
3149
3150 /*
3151 * In do_task_dead(), a dying task sets %TASK_DEAD with preemption
3152 * disabled and __schedule(). If @p has %TASK_DEAD set and off CPU, @p
3153 * won't ever run again.
3154 */
3155 return unlikely(READ_ONCE(p->__state) == TASK_DEAD) &&
3156 !task_on_cpu(rq, p);
3157 }
3158
sched_ext_dead(struct task_struct * p)3159 void sched_ext_dead(struct task_struct *p)
3160 {
3161 unsigned long flags;
3162
3163 /*
3164 * By the time control reaches here, @p has %TASK_DEAD set, switched out
3165 * for the last time and then dropped the rq lock - task_dead_and_done()
3166 * should be returning %true nullifying the straggling sched_class ops.
3167 * Remove from scx_tasks and exit @p.
3168 */
3169 raw_spin_lock_irqsave(&scx_tasks_lock, flags);
3170 list_del_init(&p->scx.tasks_node);
3171 raw_spin_unlock_irqrestore(&scx_tasks_lock, flags);
3172
3173 /*
3174 * @p is off scx_tasks and wholly ours. scx_enable()'s READY -> ENABLED
3175 * transitions can't race us. Disable ops for @p.
3176 */
3177 if (scx_get_task_state(p) != SCX_TASK_NONE) {
3178 struct rq_flags rf;
3179 struct rq *rq;
3180
3181 rq = task_rq_lock(p, &rf);
3182 scx_exit_task(p);
3183 task_rq_unlock(rq, p, &rf);
3184 }
3185 }
3186
reweight_task_scx(struct rq * rq,struct task_struct * p,const struct load_weight * lw)3187 static void reweight_task_scx(struct rq *rq, struct task_struct *p,
3188 const struct load_weight *lw)
3189 {
3190 struct scx_sched *sch = scx_root;
3191
3192 lockdep_assert_rq_held(task_rq(p));
3193
3194 if (task_dead_and_done(p))
3195 return;
3196
3197 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
3198 if (SCX_HAS_OP(sch, set_weight))
3199 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,
3200 p, p->scx.weight);
3201 }
3202
prio_changed_scx(struct rq * rq,struct task_struct * p,u64 oldprio)3203 static void prio_changed_scx(struct rq *rq, struct task_struct *p, u64 oldprio)
3204 {
3205 }
3206
switching_to_scx(struct rq * rq,struct task_struct * p)3207 static void switching_to_scx(struct rq *rq, struct task_struct *p)
3208 {
3209 struct scx_sched *sch = scx_root;
3210
3211 if (task_dead_and_done(p))
3212 return;
3213
3214 scx_enable_task(p);
3215
3216 /*
3217 * set_cpus_allowed_scx() is not called while @p is associated with a
3218 * different scheduler class. Keep the BPF scheduler up-to-date.
3219 */
3220 if (SCX_HAS_OP(sch, set_cpumask))
3221 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, rq,
3222 p, (struct cpumask *)p->cpus_ptr);
3223 }
3224
switched_from_scx(struct rq * rq,struct task_struct * p)3225 static void switched_from_scx(struct rq *rq, struct task_struct *p)
3226 {
3227 if (task_dead_and_done(p))
3228 return;
3229
3230 scx_disable_task(p);
3231 }
3232
wakeup_preempt_scx(struct rq * rq,struct task_struct * p,int wake_flags)3233 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p, int wake_flags) {}
3234
switched_to_scx(struct rq * rq,struct task_struct * p)3235 static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
3236
scx_check_setscheduler(struct task_struct * p,int policy)3237 int scx_check_setscheduler(struct task_struct *p, int policy)
3238 {
3239 lockdep_assert_rq_held(task_rq(p));
3240
3241 /* if disallow, reject transitioning into SCX */
3242 if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
3243 p->policy != policy && policy == SCHED_EXT)
3244 return -EACCES;
3245
3246 return 0;
3247 }
3248
3249 #ifdef CONFIG_NO_HZ_FULL
scx_can_stop_tick(struct rq * rq)3250 bool scx_can_stop_tick(struct rq *rq)
3251 {
3252 struct task_struct *p = rq->curr;
3253
3254 if (scx_rq_bypassing(rq))
3255 return false;
3256
3257 if (p->sched_class != &ext_sched_class)
3258 return true;
3259
3260 /*
3261 * @rq can dispatch from different DSQs, so we can't tell whether it
3262 * needs the tick or not by looking at nr_running. Allow stopping ticks
3263 * iff the BPF scheduler indicated so. See set_next_task_scx().
3264 */
3265 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
3266 }
3267 #endif
3268
3269 #ifdef CONFIG_EXT_GROUP_SCHED
3270
3271 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_ops_rwsem);
3272 static bool scx_cgroup_enabled;
3273
scx_tg_init(struct task_group * tg)3274 void scx_tg_init(struct task_group *tg)
3275 {
3276 tg->scx.weight = CGROUP_WEIGHT_DFL;
3277 tg->scx.bw_period_us = default_bw_period_us();
3278 tg->scx.bw_quota_us = RUNTIME_INF;
3279 tg->scx.idle = false;
3280 }
3281
scx_tg_online(struct task_group * tg)3282 int scx_tg_online(struct task_group *tg)
3283 {
3284 struct scx_sched *sch = scx_root;
3285 int ret = 0;
3286
3287 WARN_ON_ONCE(tg->scx.flags & (SCX_TG_ONLINE | SCX_TG_INITED));
3288
3289 if (scx_cgroup_enabled) {
3290 if (SCX_HAS_OP(sch, cgroup_init)) {
3291 struct scx_cgroup_init_args args =
3292 { .weight = tg->scx.weight,
3293 .bw_period_us = tg->scx.bw_period_us,
3294 .bw_quota_us = tg->scx.bw_quota_us,
3295 .bw_burst_us = tg->scx.bw_burst_us };
3296
3297 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init,
3298 NULL, tg->css.cgroup, &args);
3299 if (ret)
3300 ret = ops_sanitize_err(sch, "cgroup_init", ret);
3301 }
3302 if (ret == 0)
3303 tg->scx.flags |= SCX_TG_ONLINE | SCX_TG_INITED;
3304 } else {
3305 tg->scx.flags |= SCX_TG_ONLINE;
3306 }
3307
3308 return ret;
3309 }
3310
scx_tg_offline(struct task_group * tg)3311 void scx_tg_offline(struct task_group *tg)
3312 {
3313 struct scx_sched *sch = scx_root;
3314
3315 WARN_ON_ONCE(!(tg->scx.flags & SCX_TG_ONLINE));
3316
3317 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_exit) &&
3318 (tg->scx.flags & SCX_TG_INITED))
3319 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL,
3320 tg->css.cgroup);
3321 tg->scx.flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
3322 }
3323
scx_cgroup_can_attach(struct cgroup_taskset * tset)3324 int scx_cgroup_can_attach(struct cgroup_taskset *tset)
3325 {
3326 struct scx_sched *sch = scx_root;
3327 struct cgroup_subsys_state *css;
3328 struct task_struct *p;
3329 int ret;
3330
3331 if (!scx_cgroup_enabled)
3332 return 0;
3333
3334 cgroup_taskset_for_each(p, css, tset) {
3335 struct cgroup *from = tg_cgrp(task_group(p));
3336 struct cgroup *to = tg_cgrp(css_tg(css));
3337
3338 WARN_ON_ONCE(p->scx.cgrp_moving_from);
3339
3340 /*
3341 * sched_move_task() omits identity migrations. Let's match the
3342 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
3343 * always match one-to-one.
3344 */
3345 if (from == to)
3346 continue;
3347
3348 if (SCX_HAS_OP(sch, cgroup_prep_move)) {
3349 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED,
3350 cgroup_prep_move, NULL,
3351 p, from, css->cgroup);
3352 if (ret)
3353 goto err;
3354 }
3355
3356 p->scx.cgrp_moving_from = from;
3357 }
3358
3359 return 0;
3360
3361 err:
3362 cgroup_taskset_for_each(p, css, tset) {
3363 if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
3364 p->scx.cgrp_moving_from)
3365 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL,
3366 p, p->scx.cgrp_moving_from, css->cgroup);
3367 p->scx.cgrp_moving_from = NULL;
3368 }
3369
3370 return ops_sanitize_err(sch, "cgroup_prep_move", ret);
3371 }
3372
scx_cgroup_move_task(struct task_struct * p)3373 void scx_cgroup_move_task(struct task_struct *p)
3374 {
3375 struct scx_sched *sch = scx_root;
3376
3377 if (!scx_cgroup_enabled)
3378 return;
3379
3380 /*
3381 * @p must have ops.cgroup_prep_move() called on it and thus
3382 * cgrp_moving_from set.
3383 */
3384 if (SCX_HAS_OP(sch, cgroup_move) &&
3385 !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
3386 SCX_CALL_OP_TASK(sch, SCX_KF_UNLOCKED, cgroup_move, NULL,
3387 p, p->scx.cgrp_moving_from,
3388 tg_cgrp(task_group(p)));
3389 p->scx.cgrp_moving_from = NULL;
3390 }
3391
scx_cgroup_cancel_attach(struct cgroup_taskset * tset)3392 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
3393 {
3394 struct scx_sched *sch = scx_root;
3395 struct cgroup_subsys_state *css;
3396 struct task_struct *p;
3397
3398 if (!scx_cgroup_enabled)
3399 return;
3400
3401 cgroup_taskset_for_each(p, css, tset) {
3402 if (SCX_HAS_OP(sch, cgroup_cancel_move) &&
3403 p->scx.cgrp_moving_from)
3404 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL,
3405 p, p->scx.cgrp_moving_from, css->cgroup);
3406 p->scx.cgrp_moving_from = NULL;
3407 }
3408 }
3409
scx_group_set_weight(struct task_group * tg,unsigned long weight)3410 void scx_group_set_weight(struct task_group *tg, unsigned long weight)
3411 {
3412 struct scx_sched *sch = scx_root;
3413
3414 percpu_down_read(&scx_cgroup_ops_rwsem);
3415
3416 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) &&
3417 tg->scx.weight != weight)
3418 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_weight, NULL,
3419 tg_cgrp(tg), weight);
3420
3421 tg->scx.weight = weight;
3422
3423 percpu_up_read(&scx_cgroup_ops_rwsem);
3424 }
3425
scx_group_set_idle(struct task_group * tg,bool idle)3426 void scx_group_set_idle(struct task_group *tg, bool idle)
3427 {
3428 struct scx_sched *sch = scx_root;
3429
3430 percpu_down_read(&scx_cgroup_ops_rwsem);
3431
3432 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_idle))
3433 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_idle, NULL,
3434 tg_cgrp(tg), idle);
3435
3436 /* Update the task group's idle state */
3437 tg->scx.idle = idle;
3438
3439 percpu_up_read(&scx_cgroup_ops_rwsem);
3440 }
3441
scx_group_set_bandwidth(struct task_group * tg,u64 period_us,u64 quota_us,u64 burst_us)3442 void scx_group_set_bandwidth(struct task_group *tg,
3443 u64 period_us, u64 quota_us, u64 burst_us)
3444 {
3445 struct scx_sched *sch = scx_root;
3446
3447 percpu_down_read(&scx_cgroup_ops_rwsem);
3448
3449 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_bandwidth) &&
3450 (tg->scx.bw_period_us != period_us ||
3451 tg->scx.bw_quota_us != quota_us ||
3452 tg->scx.bw_burst_us != burst_us))
3453 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_bandwidth, NULL,
3454 tg_cgrp(tg), period_us, quota_us, burst_us);
3455
3456 tg->scx.bw_period_us = period_us;
3457 tg->scx.bw_quota_us = quota_us;
3458 tg->scx.bw_burst_us = burst_us;
3459
3460 percpu_up_read(&scx_cgroup_ops_rwsem);
3461 }
3462
scx_cgroup_lock(void)3463 static void scx_cgroup_lock(void)
3464 {
3465 percpu_down_write(&scx_cgroup_ops_rwsem);
3466 cgroup_lock();
3467 }
3468
scx_cgroup_unlock(void)3469 static void scx_cgroup_unlock(void)
3470 {
3471 cgroup_unlock();
3472 percpu_up_write(&scx_cgroup_ops_rwsem);
3473 }
3474
3475 #else /* CONFIG_EXT_GROUP_SCHED */
3476
scx_cgroup_lock(void)3477 static void scx_cgroup_lock(void) {}
scx_cgroup_unlock(void)3478 static void scx_cgroup_unlock(void) {}
3479
3480 #endif /* CONFIG_EXT_GROUP_SCHED */
3481
3482 /*
3483 * Omitted operations:
3484 *
3485 * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
3486 * isn't tied to the CPU at that point. Preemption is implemented by resetting
3487 * the victim task's slice to 0 and triggering reschedule on the target CPU.
3488 *
3489 * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
3490 *
3491 * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
3492 * their current sched_class. Call them directly from sched core instead.
3493 */
3494 DEFINE_SCHED_CLASS(ext) = {
3495 .enqueue_task = enqueue_task_scx,
3496 .dequeue_task = dequeue_task_scx,
3497 .yield_task = yield_task_scx,
3498 .yield_to_task = yield_to_task_scx,
3499
3500 .wakeup_preempt = wakeup_preempt_scx,
3501
3502 .pick_task = pick_task_scx,
3503
3504 .put_prev_task = put_prev_task_scx,
3505 .set_next_task = set_next_task_scx,
3506
3507 .select_task_rq = select_task_rq_scx,
3508 .task_woken = task_woken_scx,
3509 .set_cpus_allowed = set_cpus_allowed_scx,
3510
3511 .rq_online = rq_online_scx,
3512 .rq_offline = rq_offline_scx,
3513
3514 .task_tick = task_tick_scx,
3515
3516 .switching_to = switching_to_scx,
3517 .switched_from = switched_from_scx,
3518 .switched_to = switched_to_scx,
3519 .reweight_task = reweight_task_scx,
3520 .prio_changed = prio_changed_scx,
3521
3522 .update_curr = update_curr_scx,
3523
3524 #ifdef CONFIG_UCLAMP_TASK
3525 .uclamp_enabled = 1,
3526 #endif
3527 };
3528
init_dsq(struct scx_dispatch_q * dsq,u64 dsq_id)3529 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
3530 {
3531 memset(dsq, 0, sizeof(*dsq));
3532
3533 raw_spin_lock_init(&dsq->lock);
3534 INIT_LIST_HEAD(&dsq->list);
3535 dsq->id = dsq_id;
3536 }
3537
free_dsq_irq_workfn(struct irq_work * irq_work)3538 static void free_dsq_irq_workfn(struct irq_work *irq_work)
3539 {
3540 struct llist_node *to_free = llist_del_all(&dsqs_to_free);
3541 struct scx_dispatch_q *dsq, *tmp_dsq;
3542
3543 llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
3544 kfree_rcu(dsq, rcu);
3545 }
3546
3547 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
3548
destroy_dsq(struct scx_sched * sch,u64 dsq_id)3549 static void destroy_dsq(struct scx_sched *sch, u64 dsq_id)
3550 {
3551 struct scx_dispatch_q *dsq;
3552 unsigned long flags;
3553
3554 rcu_read_lock();
3555
3556 dsq = find_user_dsq(sch, dsq_id);
3557 if (!dsq)
3558 goto out_unlock_rcu;
3559
3560 raw_spin_lock_irqsave(&dsq->lock, flags);
3561
3562 if (dsq->nr) {
3563 scx_error(sch, "attempting to destroy in-use dsq 0x%016llx (nr=%u)",
3564 dsq->id, dsq->nr);
3565 goto out_unlock_dsq;
3566 }
3567
3568 if (rhashtable_remove_fast(&sch->dsq_hash, &dsq->hash_node,
3569 dsq_hash_params))
3570 goto out_unlock_dsq;
3571
3572 /*
3573 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
3574 * queueing more tasks. As this function can be called from anywhere,
3575 * freeing is bounced through an irq work to avoid nesting RCU
3576 * operations inside scheduler locks.
3577 */
3578 dsq->id = SCX_DSQ_INVALID;
3579 if (llist_add(&dsq->free_node, &dsqs_to_free))
3580 irq_work_queue(&free_dsq_irq_work);
3581
3582 out_unlock_dsq:
3583 raw_spin_unlock_irqrestore(&dsq->lock, flags);
3584 out_unlock_rcu:
3585 rcu_read_unlock();
3586 }
3587
3588 #ifdef CONFIG_EXT_GROUP_SCHED
scx_cgroup_exit(struct scx_sched * sch)3589 static void scx_cgroup_exit(struct scx_sched *sch)
3590 {
3591 struct cgroup_subsys_state *css;
3592
3593 scx_cgroup_enabled = false;
3594
3595 /*
3596 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk
3597 * cgroups and exit all the inited ones, all online cgroups are exited.
3598 */
3599 css_for_each_descendant_post(css, &root_task_group.css) {
3600 struct task_group *tg = css_tg(css);
3601
3602 if (!(tg->scx.flags & SCX_TG_INITED))
3603 continue;
3604 tg->scx.flags &= ~SCX_TG_INITED;
3605
3606 if (!sch->ops.cgroup_exit)
3607 continue;
3608
3609 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL,
3610 css->cgroup);
3611 }
3612 }
3613
scx_cgroup_init(struct scx_sched * sch)3614 static int scx_cgroup_init(struct scx_sched *sch)
3615 {
3616 struct cgroup_subsys_state *css;
3617 int ret;
3618
3619 /*
3620 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk
3621 * cgroups and init, all online cgroups are initialized.
3622 */
3623 css_for_each_descendant_pre(css, &root_task_group.css) {
3624 struct task_group *tg = css_tg(css);
3625 struct scx_cgroup_init_args args = {
3626 .weight = tg->scx.weight,
3627 .bw_period_us = tg->scx.bw_period_us,
3628 .bw_quota_us = tg->scx.bw_quota_us,
3629 .bw_burst_us = tg->scx.bw_burst_us,
3630 };
3631
3632 if ((tg->scx.flags &
3633 (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
3634 continue;
3635
3636 if (!sch->ops.cgroup_init) {
3637 tg->scx.flags |= SCX_TG_INITED;
3638 continue;
3639 }
3640
3641 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, NULL,
3642 css->cgroup, &args);
3643 if (ret) {
3644 scx_error(sch, "ops.cgroup_init() failed (%d)", ret);
3645 return ret;
3646 }
3647 tg->scx.flags |= SCX_TG_INITED;
3648 }
3649
3650 WARN_ON_ONCE(scx_cgroup_enabled);
3651 scx_cgroup_enabled = true;
3652
3653 return 0;
3654 }
3655
3656 #else
scx_cgroup_exit(struct scx_sched * sch)3657 static void scx_cgroup_exit(struct scx_sched *sch) {}
scx_cgroup_init(struct scx_sched * sch)3658 static int scx_cgroup_init(struct scx_sched *sch) { return 0; }
3659 #endif
3660
3661
3662 /********************************************************************************
3663 * Sysfs interface and ops enable/disable.
3664 */
3665
3666 #define SCX_ATTR(_name) \
3667 static struct kobj_attribute scx_attr_##_name = { \
3668 .attr = { .name = __stringify(_name), .mode = 0444 }, \
3669 .show = scx_attr_##_name##_show, \
3670 }
3671
scx_attr_state_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)3672 static ssize_t scx_attr_state_show(struct kobject *kobj,
3673 struct kobj_attribute *ka, char *buf)
3674 {
3675 return sysfs_emit(buf, "%s\n", scx_enable_state_str[scx_enable_state()]);
3676 }
3677 SCX_ATTR(state);
3678
scx_attr_switch_all_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)3679 static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
3680 struct kobj_attribute *ka, char *buf)
3681 {
3682 return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
3683 }
3684 SCX_ATTR(switch_all);
3685
scx_attr_nr_rejected_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)3686 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
3687 struct kobj_attribute *ka, char *buf)
3688 {
3689 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
3690 }
3691 SCX_ATTR(nr_rejected);
3692
scx_attr_hotplug_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)3693 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
3694 struct kobj_attribute *ka, char *buf)
3695 {
3696 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
3697 }
3698 SCX_ATTR(hotplug_seq);
3699
scx_attr_enable_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)3700 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
3701 struct kobj_attribute *ka, char *buf)
3702 {
3703 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
3704 }
3705 SCX_ATTR(enable_seq);
3706
3707 static struct attribute *scx_global_attrs[] = {
3708 &scx_attr_state.attr,
3709 &scx_attr_switch_all.attr,
3710 &scx_attr_nr_rejected.attr,
3711 &scx_attr_hotplug_seq.attr,
3712 &scx_attr_enable_seq.attr,
3713 NULL,
3714 };
3715
3716 static const struct attribute_group scx_global_attr_group = {
3717 .attrs = scx_global_attrs,
3718 };
3719
3720 static void free_exit_info(struct scx_exit_info *ei);
3721
scx_sched_free_rcu_work(struct work_struct * work)3722 static void scx_sched_free_rcu_work(struct work_struct *work)
3723 {
3724 struct rcu_work *rcu_work = to_rcu_work(work);
3725 struct scx_sched *sch = container_of(rcu_work, struct scx_sched, rcu_work);
3726 struct rhashtable_iter rht_iter;
3727 struct scx_dispatch_q *dsq;
3728 int node;
3729
3730 irq_work_sync(&sch->error_irq_work);
3731 kthread_destroy_worker(sch->helper);
3732
3733 free_percpu(sch->pcpu);
3734
3735 for_each_node_state(node, N_POSSIBLE)
3736 kfree(sch->global_dsqs[node]);
3737 kfree(sch->global_dsqs);
3738
3739 rhashtable_walk_enter(&sch->dsq_hash, &rht_iter);
3740 do {
3741 rhashtable_walk_start(&rht_iter);
3742
3743 while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
3744 destroy_dsq(sch, dsq->id);
3745
3746 rhashtable_walk_stop(&rht_iter);
3747 } while (dsq == ERR_PTR(-EAGAIN));
3748 rhashtable_walk_exit(&rht_iter);
3749
3750 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
3751 free_exit_info(sch->exit_info);
3752 kfree(sch);
3753 }
3754
scx_kobj_release(struct kobject * kobj)3755 static void scx_kobj_release(struct kobject *kobj)
3756 {
3757 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
3758
3759 INIT_RCU_WORK(&sch->rcu_work, scx_sched_free_rcu_work);
3760 queue_rcu_work(system_unbound_wq, &sch->rcu_work);
3761 }
3762
scx_attr_ops_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)3763 static ssize_t scx_attr_ops_show(struct kobject *kobj,
3764 struct kobj_attribute *ka, char *buf)
3765 {
3766 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
3767
3768 return sysfs_emit(buf, "%s\n", sch->ops.name);
3769 }
3770 SCX_ATTR(ops);
3771
3772 #define scx_attr_event_show(buf, at, events, kind) ({ \
3773 sysfs_emit_at(buf, at, "%s %llu\n", #kind, (events)->kind); \
3774 })
3775
scx_attr_events_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)3776 static ssize_t scx_attr_events_show(struct kobject *kobj,
3777 struct kobj_attribute *ka, char *buf)
3778 {
3779 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
3780 struct scx_event_stats events;
3781 int at = 0;
3782
3783 scx_read_events(sch, &events);
3784 at += scx_attr_event_show(buf, at, &events, SCX_EV_SELECT_CPU_FALLBACK);
3785 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
3786 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_KEEP_LAST);
3787 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_EXITING);
3788 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
3789 at += scx_attr_event_show(buf, at, &events, SCX_EV_REFILL_SLICE_DFL);
3790 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DURATION);
3791 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DISPATCH);
3792 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_ACTIVATE);
3793 return at;
3794 }
3795 SCX_ATTR(events);
3796
3797 static struct attribute *scx_sched_attrs[] = {
3798 &scx_attr_ops.attr,
3799 &scx_attr_events.attr,
3800 NULL,
3801 };
3802 ATTRIBUTE_GROUPS(scx_sched);
3803
3804 static const struct kobj_type scx_ktype = {
3805 .release = scx_kobj_release,
3806 .sysfs_ops = &kobj_sysfs_ops,
3807 .default_groups = scx_sched_groups,
3808 };
3809
scx_uevent(const struct kobject * kobj,struct kobj_uevent_env * env)3810 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
3811 {
3812 const struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj);
3813
3814 return add_uevent_var(env, "SCXOPS=%s", sch->ops.name);
3815 }
3816
3817 static const struct kset_uevent_ops scx_uevent_ops = {
3818 .uevent = scx_uevent,
3819 };
3820
3821 /*
3822 * Used by sched_fork() and __setscheduler_prio() to pick the matching
3823 * sched_class. dl/rt are already handled.
3824 */
task_should_scx(int policy)3825 bool task_should_scx(int policy)
3826 {
3827 if (!scx_enabled() || unlikely(scx_enable_state() == SCX_DISABLING))
3828 return false;
3829 if (READ_ONCE(scx_switching_all))
3830 return true;
3831 return policy == SCHED_EXT;
3832 }
3833
scx_allow_ttwu_queue(const struct task_struct * p)3834 bool scx_allow_ttwu_queue(const struct task_struct *p)
3835 {
3836 struct scx_sched *sch;
3837
3838 if (!scx_enabled())
3839 return true;
3840
3841 sch = rcu_dereference_sched(scx_root);
3842 if (unlikely(!sch))
3843 return true;
3844
3845 if (sch->ops.flags & SCX_OPS_ALLOW_QUEUED_WAKEUP)
3846 return true;
3847
3848 if (unlikely(p->sched_class != &ext_sched_class))
3849 return true;
3850
3851 return false;
3852 }
3853
3854 /**
3855 * handle_lockup - sched_ext common lockup handler
3856 * @fmt: format string
3857 *
3858 * Called on system stall or lockup condition and initiates abort of sched_ext
3859 * if enabled, which may resolve the reported lockup.
3860 *
3861 * Returns %true if sched_ext is enabled and abort was initiated, which may
3862 * resolve the lockup. %false if sched_ext is not enabled or abort was already
3863 * initiated by someone else.
3864 */
handle_lockup(const char * fmt,...)3865 static __printf(1, 2) bool handle_lockup(const char *fmt, ...)
3866 {
3867 struct scx_sched *sch;
3868 va_list args;
3869 bool ret;
3870
3871 guard(rcu)();
3872
3873 sch = rcu_dereference(scx_root);
3874 if (unlikely(!sch))
3875 return false;
3876
3877 switch (scx_enable_state()) {
3878 case SCX_ENABLING:
3879 case SCX_ENABLED:
3880 va_start(args, fmt);
3881 ret = scx_verror(sch, fmt, args);
3882 va_end(args);
3883 return ret;
3884 default:
3885 return false;
3886 }
3887 }
3888
3889 /**
3890 * scx_rcu_cpu_stall - sched_ext RCU CPU stall handler
3891 *
3892 * While there are various reasons why RCU CPU stalls can occur on a system
3893 * that may not be caused by the current BPF scheduler, try kicking out the
3894 * current scheduler in an attempt to recover the system to a good state before
3895 * issuing panics.
3896 *
3897 * Returns %true if sched_ext is enabled and abort was initiated, which may
3898 * resolve the reported RCU stall. %false if sched_ext is not enabled or someone
3899 * else already initiated abort.
3900 */
scx_rcu_cpu_stall(void)3901 bool scx_rcu_cpu_stall(void)
3902 {
3903 return handle_lockup("RCU CPU stall detected!");
3904 }
3905
3906 /**
3907 * scx_softlockup - sched_ext softlockup handler
3908 * @dur_s: number of seconds of CPU stuck due to soft lockup
3909 *
3910 * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can
3911 * live-lock the system by making many CPUs target the same DSQ to the point
3912 * where soft-lockup detection triggers. This function is called from
3913 * soft-lockup watchdog when the triggering point is close and tries to unjam
3914 * the system and aborting the BPF scheduler.
3915 */
scx_softlockup(u32 dur_s)3916 void scx_softlockup(u32 dur_s)
3917 {
3918 if (!handle_lockup("soft lockup - CPU %d stuck for %us", smp_processor_id(), dur_s))
3919 return;
3920
3921 printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU %d stuck for %us, disabling BPF scheduler\n",
3922 smp_processor_id(), dur_s);
3923 }
3924
3925 /**
3926 * scx_hardlockup - sched_ext hardlockup handler
3927 *
3928 * A poorly behaving BPF scheduler can trigger hard lockup by e.g. putting
3929 * numerous affinitized tasks in a single queue and directing all CPUs at it.
3930 * Try kicking out the current scheduler in an attempt to recover the system to
3931 * a good state before taking more drastic actions.
3932 *
3933 * Returns %true if sched_ext is enabled and abort was initiated, which may
3934 * resolve the reported hardlockdup. %false if sched_ext is not enabled or
3935 * someone else already initiated abort.
3936 */
scx_hardlockup(int cpu)3937 bool scx_hardlockup(int cpu)
3938 {
3939 if (!handle_lockup("hard lockup - CPU %d", cpu))
3940 return false;
3941
3942 printk_deferred(KERN_ERR "sched_ext: Hard lockup - CPU %d, disabling BPF scheduler\n",
3943 cpu);
3944 return true;
3945 }
3946
bypass_lb_cpu(struct scx_sched * sch,struct rq * rq,struct cpumask * donee_mask,struct cpumask * resched_mask,u32 nr_donor_target,u32 nr_donee_target)3947 static u32 bypass_lb_cpu(struct scx_sched *sch, struct rq *rq,
3948 struct cpumask *donee_mask, struct cpumask *resched_mask,
3949 u32 nr_donor_target, u32 nr_donee_target)
3950 {
3951 struct scx_dispatch_q *donor_dsq = &rq->scx.bypass_dsq;
3952 struct task_struct *p, *n;
3953 struct scx_dsq_list_node cursor = INIT_DSQ_LIST_CURSOR(cursor, 0, 0);
3954 s32 delta = READ_ONCE(donor_dsq->nr) - nr_donor_target;
3955 u32 nr_balanced = 0, min_delta_us;
3956
3957 /*
3958 * All we want to guarantee is reasonable forward progress. No reason to
3959 * fine tune. Assuming every task on @donor_dsq runs their full slice,
3960 * consider offloading iff the total queued duration is over the
3961 * threshold.
3962 */
3963 min_delta_us = READ_ONCE(scx_bypass_lb_intv_us) / SCX_BYPASS_LB_MIN_DELTA_DIV;
3964 if (delta < DIV_ROUND_UP(min_delta_us, READ_ONCE(scx_slice_bypass_us)))
3965 return 0;
3966
3967 raw_spin_rq_lock_irq(rq);
3968 raw_spin_lock(&donor_dsq->lock);
3969 list_add(&cursor.node, &donor_dsq->list);
3970 resume:
3971 n = container_of(&cursor, struct task_struct, scx.dsq_list);
3972 n = nldsq_next_task(donor_dsq, n, false);
3973
3974 while ((p = n)) {
3975 struct rq *donee_rq;
3976 struct scx_dispatch_q *donee_dsq;
3977 int donee;
3978
3979 n = nldsq_next_task(donor_dsq, n, false);
3980
3981 if (donor_dsq->nr <= nr_donor_target)
3982 break;
3983
3984 if (cpumask_empty(donee_mask))
3985 break;
3986
3987 donee = cpumask_any_and_distribute(donee_mask, p->cpus_ptr);
3988 if (donee >= nr_cpu_ids)
3989 continue;
3990
3991 donee_rq = cpu_rq(donee);
3992 donee_dsq = &donee_rq->scx.bypass_dsq;
3993
3994 /*
3995 * $p's rq is not locked but $p's DSQ lock protects its
3996 * scheduling properties making this test safe.
3997 */
3998 if (!task_can_run_on_remote_rq(sch, p, donee_rq, false))
3999 continue;
4000
4001 /*
4002 * Moving $p from one non-local DSQ to another. The source rq
4003 * and DSQ are already locked. Do an abbreviated dequeue and
4004 * then perform enqueue without unlocking $donor_dsq.
4005 *
4006 * We don't want to drop and reacquire the lock on each
4007 * iteration as @donor_dsq can be very long and potentially
4008 * highly contended. Donee DSQs are less likely to be contended.
4009 * The nested locking is safe as only this LB moves tasks
4010 * between bypass DSQs.
4011 */
4012 dispatch_dequeue_locked(p, donor_dsq);
4013 dispatch_enqueue(sch, donee_dsq, p, SCX_ENQ_NESTED);
4014
4015 /*
4016 * $donee might have been idle and need to be woken up. No need
4017 * to be clever. Kick every CPU that receives tasks.
4018 */
4019 cpumask_set_cpu(donee, resched_mask);
4020
4021 if (READ_ONCE(donee_dsq->nr) >= nr_donee_target)
4022 cpumask_clear_cpu(donee, donee_mask);
4023
4024 nr_balanced++;
4025 if (!(nr_balanced % SCX_BYPASS_LB_BATCH) && n) {
4026 list_move_tail(&cursor.node, &n->scx.dsq_list.node);
4027 raw_spin_unlock(&donor_dsq->lock);
4028 raw_spin_rq_unlock_irq(rq);
4029 cpu_relax();
4030 raw_spin_rq_lock_irq(rq);
4031 raw_spin_lock(&donor_dsq->lock);
4032 goto resume;
4033 }
4034 }
4035
4036 list_del_init(&cursor.node);
4037 raw_spin_unlock(&donor_dsq->lock);
4038 raw_spin_rq_unlock_irq(rq);
4039
4040 return nr_balanced;
4041 }
4042
bypass_lb_node(struct scx_sched * sch,int node)4043 static void bypass_lb_node(struct scx_sched *sch, int node)
4044 {
4045 const struct cpumask *node_mask = cpumask_of_node(node);
4046 struct cpumask *donee_mask = scx_bypass_lb_donee_cpumask;
4047 struct cpumask *resched_mask = scx_bypass_lb_resched_cpumask;
4048 u32 nr_tasks = 0, nr_cpus = 0, nr_balanced = 0;
4049 u32 nr_target, nr_donor_target;
4050 u32 before_min = U32_MAX, before_max = 0;
4051 u32 after_min = U32_MAX, after_max = 0;
4052 int cpu;
4053
4054 /* count the target tasks and CPUs */
4055 for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
4056 u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr);
4057
4058 nr_tasks += nr;
4059 nr_cpus++;
4060
4061 before_min = min(nr, before_min);
4062 before_max = max(nr, before_max);
4063 }
4064
4065 if (!nr_cpus)
4066 return;
4067
4068 /*
4069 * We don't want CPUs to have more than $nr_donor_target tasks and
4070 * balancing to fill donee CPUs upto $nr_target. Once targets are
4071 * calculated, find the donee CPUs.
4072 */
4073 nr_target = DIV_ROUND_UP(nr_tasks, nr_cpus);
4074 nr_donor_target = DIV_ROUND_UP(nr_target * SCX_BYPASS_LB_DONOR_PCT, 100);
4075
4076 cpumask_clear(donee_mask);
4077 for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
4078 if (READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr) < nr_target)
4079 cpumask_set_cpu(cpu, donee_mask);
4080 }
4081
4082 /* iterate !donee CPUs and see if they should be offloaded */
4083 cpumask_clear(resched_mask);
4084 for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
4085 struct rq *rq = cpu_rq(cpu);
4086 struct scx_dispatch_q *donor_dsq = &rq->scx.bypass_dsq;
4087
4088 if (cpumask_empty(donee_mask))
4089 break;
4090 if (cpumask_test_cpu(cpu, donee_mask))
4091 continue;
4092 if (READ_ONCE(donor_dsq->nr) <= nr_donor_target)
4093 continue;
4094
4095 nr_balanced += bypass_lb_cpu(sch, rq, donee_mask, resched_mask,
4096 nr_donor_target, nr_target);
4097 }
4098
4099 for_each_cpu(cpu, resched_mask)
4100 resched_cpu(cpu);
4101
4102 for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
4103 u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr);
4104
4105 after_min = min(nr, after_min);
4106 after_max = max(nr, after_max);
4107
4108 }
4109
4110 trace_sched_ext_bypass_lb(node, nr_cpus, nr_tasks, nr_balanced,
4111 before_min, before_max, after_min, after_max);
4112 }
4113
4114 /*
4115 * In bypass mode, all tasks are put on the per-CPU bypass DSQs. If the machine
4116 * is over-saturated and the BPF scheduler skewed tasks into few CPUs, some
4117 * bypass DSQs can be overloaded. If there are enough tasks to saturate other
4118 * lightly loaded CPUs, such imbalance can lead to very high execution latency
4119 * on the overloaded CPUs and thus to hung tasks and RCU stalls. To avoid such
4120 * outcomes, a simple load balancing mechanism is implemented by the following
4121 * timer which runs periodically while bypass mode is in effect.
4122 */
scx_bypass_lb_timerfn(struct timer_list * timer)4123 static void scx_bypass_lb_timerfn(struct timer_list *timer)
4124 {
4125 struct scx_sched *sch;
4126 int node;
4127 u32 intv_us;
4128
4129 sch = rcu_dereference_all(scx_root);
4130 if (unlikely(!sch) || !READ_ONCE(scx_bypass_depth))
4131 return;
4132
4133 for_each_node_with_cpus(node)
4134 bypass_lb_node(sch, node);
4135
4136 intv_us = READ_ONCE(scx_bypass_lb_intv_us);
4137 if (intv_us)
4138 mod_timer(timer, jiffies + usecs_to_jiffies(intv_us));
4139 }
4140
4141 static DEFINE_TIMER(scx_bypass_lb_timer, scx_bypass_lb_timerfn);
4142
4143 /**
4144 * scx_bypass - [Un]bypass scx_ops and guarantee forward progress
4145 * @bypass: true for bypass, false for unbypass
4146 *
4147 * Bypassing guarantees that all runnable tasks make forward progress without
4148 * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
4149 * be held by tasks that the BPF scheduler is forgetting to run, which
4150 * unfortunately also excludes toggling the static branches.
4151 *
4152 * Let's work around by overriding a couple ops and modifying behaviors based on
4153 * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
4154 * to force global FIFO scheduling.
4155 *
4156 * - ops.select_cpu() is ignored and the default select_cpu() is used.
4157 *
4158 * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
4159 * %SCX_OPS_ENQ_LAST is also ignored.
4160 *
4161 * - ops.dispatch() is ignored.
4162 *
4163 * - balance_one() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
4164 * can't be trusted. Whenever a tick triggers, the running task is rotated to
4165 * the tail of the queue with core_sched_at touched.
4166 *
4167 * - pick_next_task() suppresses zero slice warning.
4168 *
4169 * - scx_kick_cpu() is disabled to avoid irq_work malfunction during PM
4170 * operations.
4171 *
4172 * - scx_prio_less() reverts to the default core_sched_at order.
4173 */
scx_bypass(bool bypass)4174 static void scx_bypass(bool bypass)
4175 {
4176 static DEFINE_RAW_SPINLOCK(bypass_lock);
4177 static unsigned long bypass_timestamp;
4178 struct scx_sched *sch;
4179 unsigned long flags;
4180 int cpu;
4181
4182 raw_spin_lock_irqsave(&bypass_lock, flags);
4183 sch = rcu_dereference_bh(scx_root);
4184
4185 if (bypass) {
4186 u32 intv_us;
4187
4188 WRITE_ONCE(scx_bypass_depth, scx_bypass_depth + 1);
4189 WARN_ON_ONCE(scx_bypass_depth <= 0);
4190 if (scx_bypass_depth != 1)
4191 goto unlock;
4192 WRITE_ONCE(scx_slice_dfl, READ_ONCE(scx_slice_bypass_us) * NSEC_PER_USEC);
4193 bypass_timestamp = ktime_get_ns();
4194 if (sch)
4195 scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1);
4196
4197 intv_us = READ_ONCE(scx_bypass_lb_intv_us);
4198 if (intv_us && !timer_pending(&scx_bypass_lb_timer)) {
4199 scx_bypass_lb_timer.expires =
4200 jiffies + usecs_to_jiffies(intv_us);
4201 add_timer_global(&scx_bypass_lb_timer);
4202 }
4203 } else {
4204 WRITE_ONCE(scx_bypass_depth, scx_bypass_depth - 1);
4205 WARN_ON_ONCE(scx_bypass_depth < 0);
4206 if (scx_bypass_depth != 0)
4207 goto unlock;
4208 WRITE_ONCE(scx_slice_dfl, SCX_SLICE_DFL);
4209 if (sch)
4210 scx_add_event(sch, SCX_EV_BYPASS_DURATION,
4211 ktime_get_ns() - bypass_timestamp);
4212 }
4213
4214 /*
4215 * No task property is changing. We just need to make sure all currently
4216 * queued tasks are re-queued according to the new scx_rq_bypassing()
4217 * state. As an optimization, walk each rq's runnable_list instead of
4218 * the scx_tasks list.
4219 *
4220 * This function can't trust the scheduler and thus can't use
4221 * cpus_read_lock(). Walk all possible CPUs instead of online.
4222 */
4223 for_each_possible_cpu(cpu) {
4224 struct rq *rq = cpu_rq(cpu);
4225 struct task_struct *p, *n;
4226
4227 raw_spin_rq_lock(rq);
4228
4229 if (bypass) {
4230 WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
4231 rq->scx.flags |= SCX_RQ_BYPASSING;
4232 } else {
4233 WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
4234 rq->scx.flags &= ~SCX_RQ_BYPASSING;
4235 }
4236
4237 /*
4238 * We need to guarantee that no tasks are on the BPF scheduler
4239 * while bypassing. Either we see enabled or the enable path
4240 * sees scx_rq_bypassing() before moving tasks to SCX.
4241 */
4242 if (!scx_enabled()) {
4243 raw_spin_rq_unlock(rq);
4244 continue;
4245 }
4246
4247 /*
4248 * The use of list_for_each_entry_safe_reverse() is required
4249 * because each task is going to be removed from and added back
4250 * to the runnable_list during iteration. Because they're added
4251 * to the tail of the list, safe reverse iteration can still
4252 * visit all nodes.
4253 */
4254 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
4255 scx.runnable_node) {
4256 /* cycling deq/enq is enough, see the function comment */
4257 scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_MOVE) {
4258 /* nothing */ ;
4259 }
4260 }
4261
4262 /* resched to restore ticks and idle state */
4263 if (cpu_online(cpu) || cpu == smp_processor_id())
4264 resched_curr(rq);
4265
4266 raw_spin_rq_unlock(rq);
4267 }
4268
4269 unlock:
4270 raw_spin_unlock_irqrestore(&bypass_lock, flags);
4271 }
4272
free_exit_info(struct scx_exit_info * ei)4273 static void free_exit_info(struct scx_exit_info *ei)
4274 {
4275 kvfree(ei->dump);
4276 kfree(ei->msg);
4277 kfree(ei->bt);
4278 kfree(ei);
4279 }
4280
alloc_exit_info(size_t exit_dump_len)4281 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
4282 {
4283 struct scx_exit_info *ei;
4284
4285 ei = kzalloc_obj(*ei);
4286 if (!ei)
4287 return NULL;
4288
4289 ei->bt = kzalloc_objs(ei->bt[0], SCX_EXIT_BT_LEN);
4290 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
4291 ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL);
4292
4293 if (!ei->bt || !ei->msg || !ei->dump) {
4294 free_exit_info(ei);
4295 return NULL;
4296 }
4297
4298 return ei;
4299 }
4300
scx_exit_reason(enum scx_exit_kind kind)4301 static const char *scx_exit_reason(enum scx_exit_kind kind)
4302 {
4303 switch (kind) {
4304 case SCX_EXIT_UNREG:
4305 return "unregistered from user space";
4306 case SCX_EXIT_UNREG_BPF:
4307 return "unregistered from BPF";
4308 case SCX_EXIT_UNREG_KERN:
4309 return "unregistered from the main kernel";
4310 case SCX_EXIT_SYSRQ:
4311 return "disabled by sysrq-S";
4312 case SCX_EXIT_ERROR:
4313 return "runtime error";
4314 case SCX_EXIT_ERROR_BPF:
4315 return "scx_bpf_error";
4316 case SCX_EXIT_ERROR_STALL:
4317 return "runnable task stall";
4318 default:
4319 return "<UNKNOWN>";
4320 }
4321 }
4322
free_kick_syncs(void)4323 static void free_kick_syncs(void)
4324 {
4325 int cpu;
4326
4327 for_each_possible_cpu(cpu) {
4328 struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu);
4329 struct scx_kick_syncs *to_free;
4330
4331 to_free = rcu_replace_pointer(*ksyncs, NULL, true);
4332 if (to_free)
4333 kvfree_rcu(to_free, rcu);
4334 }
4335 }
4336
scx_disable_workfn(struct kthread_work * work)4337 static void scx_disable_workfn(struct kthread_work *work)
4338 {
4339 struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
4340 struct scx_exit_info *ei = sch->exit_info;
4341 struct scx_task_iter sti;
4342 struct task_struct *p;
4343 int kind, cpu;
4344
4345 kind = atomic_read(&sch->exit_kind);
4346 while (true) {
4347 if (kind == SCX_EXIT_DONE) /* already disabled? */
4348 return;
4349 WARN_ON_ONCE(kind == SCX_EXIT_NONE);
4350 if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE))
4351 break;
4352 }
4353 ei->kind = kind;
4354 ei->reason = scx_exit_reason(ei->kind);
4355
4356 /* guarantee forward progress by bypassing scx_ops */
4357 scx_bypass(true);
4358 WRITE_ONCE(scx_aborting, false);
4359
4360 switch (scx_set_enable_state(SCX_DISABLING)) {
4361 case SCX_DISABLING:
4362 WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
4363 break;
4364 case SCX_DISABLED:
4365 pr_warn("sched_ext: ops error detected without ops (%s)\n",
4366 sch->exit_info->msg);
4367 WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
4368 goto done;
4369 default:
4370 break;
4371 }
4372
4373 /*
4374 * Here, every runnable task is guaranteed to make forward progress and
4375 * we can safely use blocking synchronization constructs. Actually
4376 * disable ops.
4377 */
4378 mutex_lock(&scx_enable_mutex);
4379
4380 static_branch_disable(&__scx_switched_all);
4381 WRITE_ONCE(scx_switching_all, false);
4382
4383 /*
4384 * Shut down cgroup support before tasks so that the cgroup attach path
4385 * doesn't race against scx_exit_task().
4386 */
4387 scx_cgroup_lock();
4388 scx_cgroup_exit(sch);
4389 scx_cgroup_unlock();
4390
4391 /*
4392 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
4393 * must be switched out and exited synchronously.
4394 */
4395 percpu_down_write(&scx_fork_rwsem);
4396
4397 scx_init_task_enabled = false;
4398
4399 scx_task_iter_start(&sti);
4400 while ((p = scx_task_iter_next_locked(&sti))) {
4401 unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
4402 const struct sched_class *old_class = p->sched_class;
4403 const struct sched_class *new_class = scx_setscheduler_class(p);
4404
4405 update_rq_clock(task_rq(p));
4406
4407 if (old_class != new_class)
4408 queue_flags |= DEQUEUE_CLASS;
4409
4410 scoped_guard (sched_change, p, queue_flags) {
4411 p->sched_class = new_class;
4412 }
4413
4414 scx_exit_task(p);
4415 }
4416 scx_task_iter_stop(&sti);
4417 percpu_up_write(&scx_fork_rwsem);
4418
4419 /*
4420 * Invalidate all the rq clocks to prevent getting outdated
4421 * rq clocks from a previous scx scheduler.
4422 */
4423 for_each_possible_cpu(cpu) {
4424 struct rq *rq = cpu_rq(cpu);
4425 scx_rq_clock_invalidate(rq);
4426 }
4427
4428 /* no task is on scx, turn off all the switches and flush in-progress calls */
4429 static_branch_disable(&__scx_enabled);
4430 bitmap_zero(sch->has_op, SCX_OPI_END);
4431 scx_idle_disable();
4432 synchronize_rcu();
4433
4434 if (ei->kind >= SCX_EXIT_ERROR) {
4435 pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4436 sch->ops.name, ei->reason);
4437
4438 if (ei->msg[0] != '\0')
4439 pr_err("sched_ext: %s: %s\n", sch->ops.name, ei->msg);
4440 #ifdef CONFIG_STACKTRACE
4441 stack_trace_print(ei->bt, ei->bt_len, 2);
4442 #endif
4443 } else {
4444 pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4445 sch->ops.name, ei->reason);
4446 }
4447
4448 if (sch->ops.exit)
4449 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, exit, NULL, ei);
4450
4451 cancel_delayed_work_sync(&scx_watchdog_work);
4452
4453 /*
4454 * scx_root clearing must be inside cpus_read_lock(). See
4455 * handle_hotplug().
4456 */
4457 cpus_read_lock();
4458 RCU_INIT_POINTER(scx_root, NULL);
4459 cpus_read_unlock();
4460
4461 /*
4462 * Delete the kobject from the hierarchy synchronously. Otherwise, sysfs
4463 * could observe an object of the same name still in the hierarchy when
4464 * the next scheduler is loaded.
4465 */
4466 kobject_del(&sch->kobj);
4467
4468 free_percpu(scx_dsp_ctx);
4469 scx_dsp_ctx = NULL;
4470 scx_dsp_max_batch = 0;
4471 free_kick_syncs();
4472
4473 if (scx_bypassed_for_enable) {
4474 scx_bypassed_for_enable = false;
4475 scx_bypass(false);
4476 }
4477
4478 mutex_unlock(&scx_enable_mutex);
4479
4480 WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
4481 done:
4482 scx_bypass(false);
4483 }
4484
4485 /*
4486 * Claim the exit on @sch. The caller must ensure that the helper kthread work
4487 * is kicked before the current task can be preempted. Once exit_kind is
4488 * claimed, scx_error() can no longer trigger, so if the current task gets
4489 * preempted and the BPF scheduler fails to schedule it back, the helper work
4490 * will never be kicked and the whole system can wedge.
4491 */
scx_claim_exit(struct scx_sched * sch,enum scx_exit_kind kind)4492 static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind)
4493 {
4494 int none = SCX_EXIT_NONE;
4495
4496 lockdep_assert_preemption_disabled();
4497
4498 if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
4499 return false;
4500
4501 /*
4502 * Some CPUs may be trapped in the dispatch paths. Set the aborting
4503 * flag to break potential live-lock scenarios, ensuring we can
4504 * successfully reach scx_bypass().
4505 */
4506 WRITE_ONCE(scx_aborting, true);
4507 return true;
4508 }
4509
scx_disable(enum scx_exit_kind kind)4510 static void scx_disable(enum scx_exit_kind kind)
4511 {
4512 struct scx_sched *sch;
4513
4514 if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
4515 kind = SCX_EXIT_ERROR;
4516
4517 rcu_read_lock();
4518 sch = rcu_dereference(scx_root);
4519 if (sch) {
4520 guard(preempt)();
4521 scx_claim_exit(sch, kind);
4522 kthread_queue_work(sch->helper, &sch->disable_work);
4523 }
4524 rcu_read_unlock();
4525 }
4526
dump_newline(struct seq_buf * s)4527 static void dump_newline(struct seq_buf *s)
4528 {
4529 trace_sched_ext_dump("");
4530
4531 /* @s may be zero sized and seq_buf triggers WARN if so */
4532 if (s->size)
4533 seq_buf_putc(s, '\n');
4534 }
4535
dump_line(struct seq_buf * s,const char * fmt,...)4536 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
4537 {
4538 va_list args;
4539
4540 #ifdef CONFIG_TRACEPOINTS
4541 if (trace_sched_ext_dump_enabled()) {
4542 /* protected by scx_dump_state()::dump_lock */
4543 static char line_buf[SCX_EXIT_MSG_LEN];
4544
4545 va_start(args, fmt);
4546 vscnprintf(line_buf, sizeof(line_buf), fmt, args);
4547 va_end(args);
4548
4549 trace_sched_ext_dump(line_buf);
4550 }
4551 #endif
4552 /* @s may be zero sized and seq_buf triggers WARN if so */
4553 if (s->size) {
4554 va_start(args, fmt);
4555 seq_buf_vprintf(s, fmt, args);
4556 va_end(args);
4557
4558 seq_buf_putc(s, '\n');
4559 }
4560 }
4561
dump_stack_trace(struct seq_buf * s,const char * prefix,const unsigned long * bt,unsigned int len)4562 static void dump_stack_trace(struct seq_buf *s, const char *prefix,
4563 const unsigned long *bt, unsigned int len)
4564 {
4565 unsigned int i;
4566
4567 for (i = 0; i < len; i++)
4568 dump_line(s, "%s%pS", prefix, (void *)bt[i]);
4569 }
4570
ops_dump_init(struct seq_buf * s,const char * prefix)4571 static void ops_dump_init(struct seq_buf *s, const char *prefix)
4572 {
4573 struct scx_dump_data *dd = &scx_dump_data;
4574
4575 lockdep_assert_irqs_disabled();
4576
4577 dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */
4578 dd->first = true;
4579 dd->cursor = 0;
4580 dd->s = s;
4581 dd->prefix = prefix;
4582 }
4583
ops_dump_flush(void)4584 static void ops_dump_flush(void)
4585 {
4586 struct scx_dump_data *dd = &scx_dump_data;
4587 char *line = dd->buf.line;
4588
4589 if (!dd->cursor)
4590 return;
4591
4592 /*
4593 * There's something to flush and this is the first line. Insert a blank
4594 * line to distinguish ops dump.
4595 */
4596 if (dd->first) {
4597 dump_newline(dd->s);
4598 dd->first = false;
4599 }
4600
4601 /*
4602 * There may be multiple lines in $line. Scan and emit each line
4603 * separately.
4604 */
4605 while (true) {
4606 char *end = line;
4607 char c;
4608
4609 while (*end != '\n' && *end != '\0')
4610 end++;
4611
4612 /*
4613 * If $line overflowed, it may not have newline at the end.
4614 * Always emit with a newline.
4615 */
4616 c = *end;
4617 *end = '\0';
4618 dump_line(dd->s, "%s%s", dd->prefix, line);
4619 if (c == '\0')
4620 break;
4621
4622 /* move to the next line */
4623 end++;
4624 if (*end == '\0')
4625 break;
4626 line = end;
4627 }
4628
4629 dd->cursor = 0;
4630 }
4631
ops_dump_exit(void)4632 static void ops_dump_exit(void)
4633 {
4634 ops_dump_flush();
4635 scx_dump_data.cpu = -1;
4636 }
4637
scx_dump_task(struct seq_buf * s,struct scx_dump_ctx * dctx,struct task_struct * p,char marker)4638 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
4639 struct task_struct *p, char marker)
4640 {
4641 static unsigned long bt[SCX_EXIT_BT_LEN];
4642 struct scx_sched *sch = scx_root;
4643 char dsq_id_buf[19] = "(n/a)";
4644 unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
4645 unsigned int bt_len = 0;
4646
4647 if (p->scx.dsq)
4648 scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
4649 (unsigned long long)p->scx.dsq->id);
4650
4651 dump_newline(s);
4652 dump_line(s, " %c%c %s[%d] %+ldms",
4653 marker, task_state_to_char(p), p->comm, p->pid,
4654 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
4655 dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
4656 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
4657 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
4658 ops_state >> SCX_OPSS_QSEQ_SHIFT);
4659 dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s",
4660 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf);
4661 dump_line(s, " dsq_vtime=%llu slice=%llu weight=%u",
4662 p->scx.dsq_vtime, p->scx.slice, p->scx.weight);
4663 dump_line(s, " cpus=%*pb no_mig=%u", cpumask_pr_args(p->cpus_ptr),
4664 p->migration_disabled);
4665
4666 if (SCX_HAS_OP(sch, dump_task)) {
4667 ops_dump_init(s, " ");
4668 SCX_CALL_OP(sch, SCX_KF_REST, dump_task, NULL, dctx, p);
4669 ops_dump_exit();
4670 }
4671
4672 #ifdef CONFIG_STACKTRACE
4673 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
4674 #endif
4675 if (bt_len) {
4676 dump_newline(s);
4677 dump_stack_trace(s, " ", bt, bt_len);
4678 }
4679 }
4680
scx_dump_state(struct scx_exit_info * ei,size_t dump_len)4681 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
4682 {
4683 static DEFINE_SPINLOCK(dump_lock);
4684 static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
4685 struct scx_sched *sch = scx_root;
4686 struct scx_dump_ctx dctx = {
4687 .kind = ei->kind,
4688 .exit_code = ei->exit_code,
4689 .reason = ei->reason,
4690 .at_ns = ktime_get_ns(),
4691 .at_jiffies = jiffies,
4692 };
4693 struct seq_buf s;
4694 struct scx_event_stats events;
4695 unsigned long flags;
4696 char *buf;
4697 int cpu;
4698
4699 spin_lock_irqsave(&dump_lock, flags);
4700
4701 seq_buf_init(&s, ei->dump, dump_len);
4702
4703 if (ei->kind == SCX_EXIT_NONE) {
4704 dump_line(&s, "Debug dump triggered by %s", ei->reason);
4705 } else {
4706 dump_line(&s, "%s[%d] triggered exit kind %d:",
4707 current->comm, current->pid, ei->kind);
4708 dump_line(&s, " %s (%s)", ei->reason, ei->msg);
4709 dump_newline(&s);
4710 dump_line(&s, "Backtrace:");
4711 dump_stack_trace(&s, " ", ei->bt, ei->bt_len);
4712 }
4713
4714 if (SCX_HAS_OP(sch, dump)) {
4715 ops_dump_init(&s, "");
4716 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, dump, NULL, &dctx);
4717 ops_dump_exit();
4718 }
4719
4720 dump_newline(&s);
4721 dump_line(&s, "CPU states");
4722 dump_line(&s, "----------");
4723
4724 for_each_possible_cpu(cpu) {
4725 struct rq *rq = cpu_rq(cpu);
4726 struct rq_flags rf;
4727 struct task_struct *p;
4728 struct seq_buf ns;
4729 size_t avail, used;
4730 bool idle;
4731
4732 rq_lock_irqsave(rq, &rf);
4733
4734 idle = list_empty(&rq->scx.runnable_list) &&
4735 rq->curr->sched_class == &idle_sched_class;
4736
4737 if (idle && !SCX_HAS_OP(sch, dump_cpu))
4738 goto next;
4739
4740 /*
4741 * We don't yet know whether ops.dump_cpu() will produce output
4742 * and we may want to skip the default CPU dump if it doesn't.
4743 * Use a nested seq_buf to generate the standard dump so that we
4744 * can decide whether to commit later.
4745 */
4746 avail = seq_buf_get_buf(&s, &buf);
4747 seq_buf_init(&ns, buf, avail);
4748
4749 dump_newline(&ns);
4750 dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu ksync=%lu",
4751 cpu, rq->scx.nr_running, rq->scx.flags,
4752 rq->scx.cpu_released, rq->scx.ops_qseq,
4753 rq->scx.kick_sync);
4754 dump_line(&ns, " curr=%s[%d] class=%ps",
4755 rq->curr->comm, rq->curr->pid,
4756 rq->curr->sched_class);
4757 if (!cpumask_empty(rq->scx.cpus_to_kick))
4758 dump_line(&ns, " cpus_to_kick : %*pb",
4759 cpumask_pr_args(rq->scx.cpus_to_kick));
4760 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
4761 dump_line(&ns, " idle_to_kick : %*pb",
4762 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
4763 if (!cpumask_empty(rq->scx.cpus_to_preempt))
4764 dump_line(&ns, " cpus_to_preempt: %*pb",
4765 cpumask_pr_args(rq->scx.cpus_to_preempt));
4766 if (!cpumask_empty(rq->scx.cpus_to_wait))
4767 dump_line(&ns, " cpus_to_wait : %*pb",
4768 cpumask_pr_args(rq->scx.cpus_to_wait));
4769 if (!cpumask_empty(rq->scx.cpus_to_sync))
4770 dump_line(&ns, " cpus_to_sync : %*pb",
4771 cpumask_pr_args(rq->scx.cpus_to_sync));
4772
4773 used = seq_buf_used(&ns);
4774 if (SCX_HAS_OP(sch, dump_cpu)) {
4775 ops_dump_init(&ns, " ");
4776 SCX_CALL_OP(sch, SCX_KF_REST, dump_cpu, NULL,
4777 &dctx, cpu, idle);
4778 ops_dump_exit();
4779 }
4780
4781 /*
4782 * If idle && nothing generated by ops.dump_cpu(), there's
4783 * nothing interesting. Skip.
4784 */
4785 if (idle && used == seq_buf_used(&ns))
4786 goto next;
4787
4788 /*
4789 * $s may already have overflowed when $ns was created. If so,
4790 * calling commit on it will trigger BUG.
4791 */
4792 if (avail) {
4793 seq_buf_commit(&s, seq_buf_used(&ns));
4794 if (seq_buf_has_overflowed(&ns))
4795 seq_buf_set_overflow(&s);
4796 }
4797
4798 if (rq->curr->sched_class == &ext_sched_class)
4799 scx_dump_task(&s, &dctx, rq->curr, '*');
4800
4801 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
4802 scx_dump_task(&s, &dctx, p, ' ');
4803 next:
4804 rq_unlock_irqrestore(rq, &rf);
4805 }
4806
4807 dump_newline(&s);
4808 dump_line(&s, "Event counters");
4809 dump_line(&s, "--------------");
4810
4811 scx_read_events(sch, &events);
4812 scx_dump_event(s, &events, SCX_EV_SELECT_CPU_FALLBACK);
4813 scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
4814 scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST);
4815 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_EXITING);
4816 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
4817 scx_dump_event(s, &events, SCX_EV_REFILL_SLICE_DFL);
4818 scx_dump_event(s, &events, SCX_EV_BYPASS_DURATION);
4819 scx_dump_event(s, &events, SCX_EV_BYPASS_DISPATCH);
4820 scx_dump_event(s, &events, SCX_EV_BYPASS_ACTIVATE);
4821
4822 if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
4823 memcpy(ei->dump + dump_len - sizeof(trunc_marker),
4824 trunc_marker, sizeof(trunc_marker));
4825
4826 spin_unlock_irqrestore(&dump_lock, flags);
4827 }
4828
scx_error_irq_workfn(struct irq_work * irq_work)4829 static void scx_error_irq_workfn(struct irq_work *irq_work)
4830 {
4831 struct scx_sched *sch = container_of(irq_work, struct scx_sched, error_irq_work);
4832 struct scx_exit_info *ei = sch->exit_info;
4833
4834 if (ei->kind >= SCX_EXIT_ERROR)
4835 scx_dump_state(ei, sch->ops.exit_dump_len);
4836
4837 kthread_queue_work(sch->helper, &sch->disable_work);
4838 }
4839
scx_vexit(struct scx_sched * sch,enum scx_exit_kind kind,s64 exit_code,const char * fmt,va_list args)4840 static bool scx_vexit(struct scx_sched *sch,
4841 enum scx_exit_kind kind, s64 exit_code,
4842 const char *fmt, va_list args)
4843 {
4844 struct scx_exit_info *ei = sch->exit_info;
4845
4846 guard(preempt)();
4847
4848 if (!scx_claim_exit(sch, kind))
4849 return false;
4850
4851 ei->exit_code = exit_code;
4852 #ifdef CONFIG_STACKTRACE
4853 if (kind >= SCX_EXIT_ERROR)
4854 ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
4855 #endif
4856 vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
4857
4858 /*
4859 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
4860 * in scx_disable_workfn().
4861 */
4862 ei->kind = kind;
4863 ei->reason = scx_exit_reason(ei->kind);
4864
4865 irq_work_queue(&sch->error_irq_work);
4866 return true;
4867 }
4868
alloc_kick_syncs(void)4869 static int alloc_kick_syncs(void)
4870 {
4871 int cpu;
4872
4873 /*
4874 * Allocate per-CPU arrays sized by nr_cpu_ids. Use kvzalloc as size
4875 * can exceed percpu allocator limits on large machines.
4876 */
4877 for_each_possible_cpu(cpu) {
4878 struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu);
4879 struct scx_kick_syncs *new_ksyncs;
4880
4881 WARN_ON_ONCE(rcu_access_pointer(*ksyncs));
4882
4883 new_ksyncs = kvzalloc_node(struct_size(new_ksyncs, syncs, nr_cpu_ids),
4884 GFP_KERNEL, cpu_to_node(cpu));
4885 if (!new_ksyncs) {
4886 free_kick_syncs();
4887 return -ENOMEM;
4888 }
4889
4890 rcu_assign_pointer(*ksyncs, new_ksyncs);
4891 }
4892
4893 return 0;
4894 }
4895
scx_alloc_and_add_sched(struct sched_ext_ops * ops)4896 static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
4897 {
4898 struct scx_sched *sch;
4899 int node, ret;
4900
4901 sch = kzalloc_obj(*sch);
4902 if (!sch)
4903 return ERR_PTR(-ENOMEM);
4904
4905 sch->exit_info = alloc_exit_info(ops->exit_dump_len);
4906 if (!sch->exit_info) {
4907 ret = -ENOMEM;
4908 goto err_free_sch;
4909 }
4910
4911 ret = rhashtable_init(&sch->dsq_hash, &dsq_hash_params);
4912 if (ret < 0)
4913 goto err_free_ei;
4914
4915 sch->global_dsqs = kzalloc_objs(sch->global_dsqs[0], nr_node_ids);
4916 if (!sch->global_dsqs) {
4917 ret = -ENOMEM;
4918 goto err_free_hash;
4919 }
4920
4921 for_each_node_state(node, N_POSSIBLE) {
4922 struct scx_dispatch_q *dsq;
4923
4924 dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
4925 if (!dsq) {
4926 ret = -ENOMEM;
4927 goto err_free_gdsqs;
4928 }
4929
4930 init_dsq(dsq, SCX_DSQ_GLOBAL);
4931 sch->global_dsqs[node] = dsq;
4932 }
4933
4934 sch->pcpu = alloc_percpu(struct scx_sched_pcpu);
4935 if (!sch->pcpu) {
4936 ret = -ENOMEM;
4937 goto err_free_gdsqs;
4938 }
4939
4940 sch->helper = kthread_run_worker(0, "sched_ext_helper");
4941 if (IS_ERR(sch->helper)) {
4942 ret = PTR_ERR(sch->helper);
4943 goto err_free_pcpu;
4944 }
4945
4946 sched_set_fifo(sch->helper->task);
4947
4948 atomic_set(&sch->exit_kind, SCX_EXIT_NONE);
4949 init_irq_work(&sch->error_irq_work, scx_error_irq_workfn);
4950 kthread_init_work(&sch->disable_work, scx_disable_workfn);
4951 sch->ops = *ops;
4952 ops->priv = sch;
4953
4954 sch->kobj.kset = scx_kset;
4955 ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root");
4956 if (ret < 0)
4957 goto err_stop_helper;
4958
4959 return sch;
4960
4961 err_stop_helper:
4962 kthread_destroy_worker(sch->helper);
4963 err_free_pcpu:
4964 free_percpu(sch->pcpu);
4965 err_free_gdsqs:
4966 for_each_node_state(node, N_POSSIBLE)
4967 kfree(sch->global_dsqs[node]);
4968 kfree(sch->global_dsqs);
4969 err_free_hash:
4970 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL);
4971 err_free_ei:
4972 free_exit_info(sch->exit_info);
4973 err_free_sch:
4974 kfree(sch);
4975 return ERR_PTR(ret);
4976 }
4977
check_hotplug_seq(struct scx_sched * sch,const struct sched_ext_ops * ops)4978 static int check_hotplug_seq(struct scx_sched *sch,
4979 const struct sched_ext_ops *ops)
4980 {
4981 unsigned long long global_hotplug_seq;
4982
4983 /*
4984 * If a hotplug event has occurred between when a scheduler was
4985 * initialized, and when we were able to attach, exit and notify user
4986 * space about it.
4987 */
4988 if (ops->hotplug_seq) {
4989 global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
4990 if (ops->hotplug_seq != global_hotplug_seq) {
4991 scx_exit(sch, SCX_EXIT_UNREG_KERN,
4992 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
4993 "expected hotplug seq %llu did not match actual %llu",
4994 ops->hotplug_seq, global_hotplug_seq);
4995 return -EBUSY;
4996 }
4997 }
4998
4999 return 0;
5000 }
5001
validate_ops(struct scx_sched * sch,const struct sched_ext_ops * ops)5002 static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops)
5003 {
5004 /*
5005 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
5006 * ops.enqueue() callback isn't implemented.
5007 */
5008 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
5009 scx_error(sch, "SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
5010 return -EINVAL;
5011 }
5012
5013 /*
5014 * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle
5015 * selection policy to be enabled.
5016 */
5017 if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) &&
5018 (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) {
5019 scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled");
5020 return -EINVAL;
5021 }
5022
5023 if (ops->flags & SCX_OPS_HAS_CGROUP_WEIGHT)
5024 pr_warn("SCX_OPS_HAS_CGROUP_WEIGHT is deprecated and a noop\n");
5025
5026 if (ops->cpu_acquire || ops->cpu_release)
5027 pr_warn("ops->cpu_acquire/release() are deprecated, use sched_switch TP instead\n");
5028
5029 return 0;
5030 }
5031
5032 /*
5033 * scx_enable() is offloaded to a dedicated system-wide RT kthread to avoid
5034 * starvation. During the READY -> ENABLED task switching loop, the calling
5035 * thread's sched_class gets switched from fair to ext. As fair has higher
5036 * priority than ext, the calling thread can be indefinitely starved under
5037 * fair-class saturation, leading to a system hang.
5038 */
5039 struct scx_enable_cmd {
5040 struct kthread_work work;
5041 struct sched_ext_ops *ops;
5042 int ret;
5043 };
5044
scx_enable_workfn(struct kthread_work * work)5045 static void scx_enable_workfn(struct kthread_work *work)
5046 {
5047 struct scx_enable_cmd *cmd =
5048 container_of(work, struct scx_enable_cmd, work);
5049 struct sched_ext_ops *ops = cmd->ops;
5050 struct scx_sched *sch;
5051 struct scx_task_iter sti;
5052 struct task_struct *p;
5053 unsigned long timeout;
5054 int i, cpu, ret;
5055
5056 mutex_lock(&scx_enable_mutex);
5057
5058 if (scx_enable_state() != SCX_DISABLED) {
5059 ret = -EBUSY;
5060 goto err_unlock;
5061 }
5062
5063 ret = alloc_kick_syncs();
5064 if (ret)
5065 goto err_unlock;
5066
5067 sch = scx_alloc_and_add_sched(ops);
5068 if (IS_ERR(sch)) {
5069 ret = PTR_ERR(sch);
5070 goto err_free_ksyncs;
5071 }
5072
5073 /*
5074 * Transition to ENABLING and clear exit info to arm the disable path.
5075 * Failure triggers full disabling from here on.
5076 */
5077 WARN_ON_ONCE(scx_set_enable_state(SCX_ENABLING) != SCX_DISABLED);
5078 WARN_ON_ONCE(scx_root);
5079 if (WARN_ON_ONCE(READ_ONCE(scx_aborting)))
5080 WRITE_ONCE(scx_aborting, false);
5081
5082 atomic_long_set(&scx_nr_rejected, 0);
5083
5084 for_each_possible_cpu(cpu)
5085 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
5086
5087 /*
5088 * Keep CPUs stable during enable so that the BPF scheduler can track
5089 * online CPUs by watching ->on/offline_cpu() after ->init().
5090 */
5091 cpus_read_lock();
5092
5093 /*
5094 * Make the scheduler instance visible. Must be inside cpus_read_lock().
5095 * See handle_hotplug().
5096 */
5097 rcu_assign_pointer(scx_root, sch);
5098
5099 scx_idle_enable(ops);
5100
5101 if (sch->ops.init) {
5102 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init, NULL);
5103 if (ret) {
5104 ret = ops_sanitize_err(sch, "init", ret);
5105 cpus_read_unlock();
5106 scx_error(sch, "ops.init() failed (%d)", ret);
5107 goto err_disable;
5108 }
5109 sch->exit_info->flags |= SCX_EFLAG_INITIALIZED;
5110 }
5111
5112 for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
5113 if (((void (**)(void))ops)[i])
5114 set_bit(i, sch->has_op);
5115
5116 ret = check_hotplug_seq(sch, ops);
5117 if (ret) {
5118 cpus_read_unlock();
5119 goto err_disable;
5120 }
5121 scx_idle_update_selcpu_topology(ops);
5122
5123 cpus_read_unlock();
5124
5125 ret = validate_ops(sch, ops);
5126 if (ret)
5127 goto err_disable;
5128
5129 WARN_ON_ONCE(scx_dsp_ctx);
5130 scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
5131 scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
5132 scx_dsp_max_batch),
5133 __alignof__(struct scx_dsp_ctx));
5134 if (!scx_dsp_ctx) {
5135 ret = -ENOMEM;
5136 goto err_disable;
5137 }
5138
5139 if (ops->timeout_ms)
5140 timeout = msecs_to_jiffies(ops->timeout_ms);
5141 else
5142 timeout = SCX_WATCHDOG_MAX_TIMEOUT;
5143
5144 WRITE_ONCE(scx_watchdog_timeout, timeout);
5145 WRITE_ONCE(scx_watchdog_timestamp, jiffies);
5146 queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
5147 READ_ONCE(scx_watchdog_timeout) / 2);
5148
5149 /*
5150 * Once __scx_enabled is set, %current can be switched to SCX anytime.
5151 * This can lead to stalls as some BPF schedulers (e.g. userspace
5152 * scheduling) may not function correctly before all tasks are switched.
5153 * Init in bypass mode to guarantee forward progress.
5154 */
5155 scx_bypass(true);
5156 scx_bypassed_for_enable = true;
5157
5158 for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
5159 if (((void (**)(void))ops)[i])
5160 set_bit(i, sch->has_op);
5161
5162 if (sch->ops.cpu_acquire || sch->ops.cpu_release)
5163 sch->ops.flags |= SCX_OPS_HAS_CPU_PREEMPT;
5164
5165 /*
5166 * Lock out forks, cgroup on/offlining and moves before opening the
5167 * floodgate so that they don't wander into the operations prematurely.
5168 */
5169 percpu_down_write(&scx_fork_rwsem);
5170
5171 WARN_ON_ONCE(scx_init_task_enabled);
5172 scx_init_task_enabled = true;
5173
5174 /*
5175 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
5176 * preventing new tasks from being added. No need to exclude tasks
5177 * leaving as sched_ext_free() can handle both prepped and enabled
5178 * tasks. Prep all tasks first and then enable them with preemption
5179 * disabled.
5180 *
5181 * All cgroups should be initialized before scx_init_task() so that the
5182 * BPF scheduler can reliably track each task's cgroup membership from
5183 * scx_init_task(). Lock out cgroup on/offlining and task migrations
5184 * while tasks are being initialized so that scx_cgroup_can_attach()
5185 * never sees uninitialized tasks.
5186 */
5187 scx_cgroup_lock();
5188 ret = scx_cgroup_init(sch);
5189 if (ret)
5190 goto err_disable_unlock_all;
5191
5192 scx_task_iter_start(&sti);
5193 while ((p = scx_task_iter_next_locked(&sti))) {
5194 /*
5195 * @p may already be dead, have lost all its usages counts and
5196 * be waiting for RCU grace period before being freed. @p can't
5197 * be initialized for SCX in such cases and should be ignored.
5198 */
5199 if (!tryget_task_struct(p))
5200 continue;
5201
5202 scx_task_iter_unlock(&sti);
5203
5204 ret = scx_init_task(p, task_group(p), false);
5205 if (ret) {
5206 put_task_struct(p);
5207 scx_task_iter_stop(&sti);
5208 scx_error(sch, "ops.init_task() failed (%d) for %s[%d]",
5209 ret, p->comm, p->pid);
5210 goto err_disable_unlock_all;
5211 }
5212
5213 scx_set_task_state(p, SCX_TASK_READY);
5214
5215 put_task_struct(p);
5216 }
5217 scx_task_iter_stop(&sti);
5218 scx_cgroup_unlock();
5219 percpu_up_write(&scx_fork_rwsem);
5220
5221 /*
5222 * All tasks are READY. It's safe to turn on scx_enabled() and switch
5223 * all eligible tasks.
5224 */
5225 WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
5226 static_branch_enable(&__scx_enabled);
5227
5228 /*
5229 * We're fully committed and can't fail. The task READY -> ENABLED
5230 * transitions here are synchronized against sched_ext_free() through
5231 * scx_tasks_lock.
5232 */
5233 percpu_down_write(&scx_fork_rwsem);
5234 scx_task_iter_start(&sti);
5235 while ((p = scx_task_iter_next_locked(&sti))) {
5236 unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
5237 const struct sched_class *old_class = p->sched_class;
5238 const struct sched_class *new_class = scx_setscheduler_class(p);
5239
5240 if (scx_get_task_state(p) != SCX_TASK_READY)
5241 continue;
5242
5243 if (old_class != new_class)
5244 queue_flags |= DEQUEUE_CLASS;
5245
5246 scoped_guard (sched_change, p, queue_flags) {
5247 p->scx.slice = READ_ONCE(scx_slice_dfl);
5248 p->sched_class = new_class;
5249 }
5250 }
5251 scx_task_iter_stop(&sti);
5252 percpu_up_write(&scx_fork_rwsem);
5253
5254 scx_bypassed_for_enable = false;
5255 scx_bypass(false);
5256
5257 if (!scx_tryset_enable_state(SCX_ENABLED, SCX_ENABLING)) {
5258 WARN_ON_ONCE(atomic_read(&sch->exit_kind) == SCX_EXIT_NONE);
5259 goto err_disable;
5260 }
5261
5262 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
5263 static_branch_enable(&__scx_switched_all);
5264
5265 pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
5266 sch->ops.name, scx_switched_all() ? "" : " (partial)");
5267 kobject_uevent(&sch->kobj, KOBJ_ADD);
5268 mutex_unlock(&scx_enable_mutex);
5269
5270 atomic_long_inc(&scx_enable_seq);
5271
5272 cmd->ret = 0;
5273 return;
5274
5275 err_free_ksyncs:
5276 free_kick_syncs();
5277 err_unlock:
5278 mutex_unlock(&scx_enable_mutex);
5279 cmd->ret = ret;
5280 return;
5281
5282 err_disable_unlock_all:
5283 scx_cgroup_unlock();
5284 percpu_up_write(&scx_fork_rwsem);
5285 /* we'll soon enter disable path, keep bypass on */
5286 err_disable:
5287 mutex_unlock(&scx_enable_mutex);
5288 /*
5289 * Returning an error code here would not pass all the error information
5290 * to userspace. Record errno using scx_error() for cases scx_error()
5291 * wasn't already invoked and exit indicating success so that the error
5292 * is notified through ops.exit() with all the details.
5293 *
5294 * Flush scx_disable_work to ensure that error is reported before init
5295 * completion. sch's base reference will be put by bpf_scx_unreg().
5296 */
5297 scx_error(sch, "scx_enable() failed (%d)", ret);
5298 kthread_flush_work(&sch->disable_work);
5299 cmd->ret = 0;
5300 }
5301
scx_enable(struct sched_ext_ops * ops,struct bpf_link * link)5302 static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
5303 {
5304 static struct kthread_worker *helper;
5305 static DEFINE_MUTEX(helper_mutex);
5306 struct scx_enable_cmd cmd;
5307
5308 if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
5309 cpu_possible_mask)) {
5310 pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
5311 return -EINVAL;
5312 }
5313
5314 if (!READ_ONCE(helper)) {
5315 mutex_lock(&helper_mutex);
5316 if (!helper) {
5317 struct kthread_worker *w =
5318 kthread_run_worker(0, "scx_enable_helper");
5319 if (IS_ERR_OR_NULL(w)) {
5320 mutex_unlock(&helper_mutex);
5321 return -ENOMEM;
5322 }
5323 sched_set_fifo(w->task);
5324 WRITE_ONCE(helper, w);
5325 }
5326 mutex_unlock(&helper_mutex);
5327 }
5328
5329 kthread_init_work(&cmd.work, scx_enable_workfn);
5330 cmd.ops = ops;
5331
5332 kthread_queue_work(READ_ONCE(helper), &cmd.work);
5333 kthread_flush_work(&cmd.work);
5334 return cmd.ret;
5335 }
5336
5337
5338 /********************************************************************************
5339 * bpf_struct_ops plumbing.
5340 */
5341 #include <linux/bpf_verifier.h>
5342 #include <linux/bpf.h>
5343 #include <linux/btf.h>
5344
5345 static const struct btf_type *task_struct_type;
5346
bpf_scx_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)5347 static bool bpf_scx_is_valid_access(int off, int size,
5348 enum bpf_access_type type,
5349 const struct bpf_prog *prog,
5350 struct bpf_insn_access_aux *info)
5351 {
5352 if (type != BPF_READ)
5353 return false;
5354 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
5355 return false;
5356 if (off % size != 0)
5357 return false;
5358
5359 return btf_ctx_access(off, size, type, prog, info);
5360 }
5361
bpf_scx_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size)5362 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
5363 const struct bpf_reg_state *reg, int off,
5364 int size)
5365 {
5366 const struct btf_type *t;
5367
5368 t = btf_type_by_id(reg->btf, reg->btf_id);
5369 if (t == task_struct_type) {
5370 if (off >= offsetof(struct task_struct, scx.slice) &&
5371 off + size <= offsetofend(struct task_struct, scx.slice))
5372 return SCALAR_VALUE;
5373 if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
5374 off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
5375 return SCALAR_VALUE;
5376 if (off >= offsetof(struct task_struct, scx.disallow) &&
5377 off + size <= offsetofend(struct task_struct, scx.disallow))
5378 return SCALAR_VALUE;
5379 }
5380
5381 return -EACCES;
5382 }
5383
5384 static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
5385 .get_func_proto = bpf_base_func_proto,
5386 .is_valid_access = bpf_scx_is_valid_access,
5387 .btf_struct_access = bpf_scx_btf_struct_access,
5388 };
5389
bpf_scx_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)5390 static int bpf_scx_init_member(const struct btf_type *t,
5391 const struct btf_member *member,
5392 void *kdata, const void *udata)
5393 {
5394 const struct sched_ext_ops *uops = udata;
5395 struct sched_ext_ops *ops = kdata;
5396 u32 moff = __btf_member_bit_offset(t, member) / 8;
5397 int ret;
5398
5399 switch (moff) {
5400 case offsetof(struct sched_ext_ops, dispatch_max_batch):
5401 if (*(u32 *)(udata + moff) > INT_MAX)
5402 return -E2BIG;
5403 ops->dispatch_max_batch = *(u32 *)(udata + moff);
5404 return 1;
5405 case offsetof(struct sched_ext_ops, flags):
5406 if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
5407 return -EINVAL;
5408 ops->flags = *(u64 *)(udata + moff);
5409 return 1;
5410 case offsetof(struct sched_ext_ops, name):
5411 ret = bpf_obj_name_cpy(ops->name, uops->name,
5412 sizeof(ops->name));
5413 if (ret < 0)
5414 return ret;
5415 if (ret == 0)
5416 return -EINVAL;
5417 return 1;
5418 case offsetof(struct sched_ext_ops, timeout_ms):
5419 if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
5420 SCX_WATCHDOG_MAX_TIMEOUT)
5421 return -E2BIG;
5422 ops->timeout_ms = *(u32 *)(udata + moff);
5423 return 1;
5424 case offsetof(struct sched_ext_ops, exit_dump_len):
5425 ops->exit_dump_len =
5426 *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
5427 return 1;
5428 case offsetof(struct sched_ext_ops, hotplug_seq):
5429 ops->hotplug_seq = *(u64 *)(udata + moff);
5430 return 1;
5431 }
5432
5433 return 0;
5434 }
5435
bpf_scx_check_member(const struct btf_type * t,const struct btf_member * member,const struct bpf_prog * prog)5436 static int bpf_scx_check_member(const struct btf_type *t,
5437 const struct btf_member *member,
5438 const struct bpf_prog *prog)
5439 {
5440 u32 moff = __btf_member_bit_offset(t, member) / 8;
5441
5442 switch (moff) {
5443 case offsetof(struct sched_ext_ops, init_task):
5444 #ifdef CONFIG_EXT_GROUP_SCHED
5445 case offsetof(struct sched_ext_ops, cgroup_init):
5446 case offsetof(struct sched_ext_ops, cgroup_exit):
5447 case offsetof(struct sched_ext_ops, cgroup_prep_move):
5448 #endif
5449 case offsetof(struct sched_ext_ops, cpu_online):
5450 case offsetof(struct sched_ext_ops, cpu_offline):
5451 case offsetof(struct sched_ext_ops, init):
5452 case offsetof(struct sched_ext_ops, exit):
5453 break;
5454 default:
5455 if (prog->sleepable)
5456 return -EINVAL;
5457 }
5458
5459 return 0;
5460 }
5461
bpf_scx_reg(void * kdata,struct bpf_link * link)5462 static int bpf_scx_reg(void *kdata, struct bpf_link *link)
5463 {
5464 return scx_enable(kdata, link);
5465 }
5466
bpf_scx_unreg(void * kdata,struct bpf_link * link)5467 static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
5468 {
5469 struct sched_ext_ops *ops = kdata;
5470 struct scx_sched *sch = ops->priv;
5471
5472 scx_disable(SCX_EXIT_UNREG);
5473 kthread_flush_work(&sch->disable_work);
5474 kobject_put(&sch->kobj);
5475 }
5476
bpf_scx_init(struct btf * btf)5477 static int bpf_scx_init(struct btf *btf)
5478 {
5479 task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]);
5480
5481 return 0;
5482 }
5483
bpf_scx_update(void * kdata,void * old_kdata,struct bpf_link * link)5484 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
5485 {
5486 /*
5487 * sched_ext does not support updating the actively-loaded BPF
5488 * scheduler, as registering a BPF scheduler can always fail if the
5489 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
5490 * etc. Similarly, we can always race with unregistration happening
5491 * elsewhere, such as with sysrq.
5492 */
5493 return -EOPNOTSUPP;
5494 }
5495
bpf_scx_validate(void * kdata)5496 static int bpf_scx_validate(void *kdata)
5497 {
5498 return 0;
5499 }
5500
sched_ext_ops__select_cpu(struct task_struct * p,s32 prev_cpu,u64 wake_flags)5501 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
sched_ext_ops__enqueue(struct task_struct * p,u64 enq_flags)5502 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__dequeue(struct task_struct * p,u64 enq_flags)5503 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__dispatch(s32 prev_cpu,struct task_struct * prev__nullable)5504 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {}
sched_ext_ops__tick(struct task_struct * p)5505 static void sched_ext_ops__tick(struct task_struct *p) {}
sched_ext_ops__runnable(struct task_struct * p,u64 enq_flags)5506 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {}
sched_ext_ops__running(struct task_struct * p)5507 static void sched_ext_ops__running(struct task_struct *p) {}
sched_ext_ops__stopping(struct task_struct * p,bool runnable)5508 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {}
sched_ext_ops__quiescent(struct task_struct * p,u64 deq_flags)5509 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {}
sched_ext_ops__yield(struct task_struct * from,struct task_struct * to__nullable)5510 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; }
sched_ext_ops__core_sched_before(struct task_struct * a,struct task_struct * b)5511 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; }
sched_ext_ops__set_weight(struct task_struct * p,u32 weight)5512 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {}
sched_ext_ops__set_cpumask(struct task_struct * p,const struct cpumask * mask)5513 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {}
sched_ext_ops__update_idle(s32 cpu,bool idle)5514 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {}
sched_ext_ops__cpu_acquire(s32 cpu,struct scx_cpu_acquire_args * args)5515 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {}
sched_ext_ops__cpu_release(s32 cpu,struct scx_cpu_release_args * args)5516 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {}
sched_ext_ops__init_task(struct task_struct * p,struct scx_init_task_args * args)5517 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
sched_ext_ops__exit_task(struct task_struct * p,struct scx_exit_task_args * args)5518 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {}
sched_ext_ops__enable(struct task_struct * p)5519 static void sched_ext_ops__enable(struct task_struct *p) {}
sched_ext_ops__disable(struct task_struct * p)5520 static void sched_ext_ops__disable(struct task_struct *p) {}
5521 #ifdef CONFIG_EXT_GROUP_SCHED
sched_ext_ops__cgroup_init(struct cgroup * cgrp,struct scx_cgroup_init_args * args)5522 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
sched_ext_ops__cgroup_exit(struct cgroup * cgrp)5523 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {}
sched_ext_ops__cgroup_prep_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)5524 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
sched_ext_ops__cgroup_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)5525 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
sched_ext_ops__cgroup_cancel_move(struct task_struct * p,struct cgroup * from,struct cgroup * to)5526 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
sched_ext_ops__cgroup_set_weight(struct cgroup * cgrp,u32 weight)5527 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {}
sched_ext_ops__cgroup_set_bandwidth(struct cgroup * cgrp,u64 period_us,u64 quota_us,u64 burst_us)5528 static void sched_ext_ops__cgroup_set_bandwidth(struct cgroup *cgrp, u64 period_us, u64 quota_us, u64 burst_us) {}
sched_ext_ops__cgroup_set_idle(struct cgroup * cgrp,bool idle)5529 static void sched_ext_ops__cgroup_set_idle(struct cgroup *cgrp, bool idle) {}
5530 #endif
sched_ext_ops__cpu_online(s32 cpu)5531 static void sched_ext_ops__cpu_online(s32 cpu) {}
sched_ext_ops__cpu_offline(s32 cpu)5532 static void sched_ext_ops__cpu_offline(s32 cpu) {}
sched_ext_ops__init(void)5533 static s32 sched_ext_ops__init(void) { return -EINVAL; }
sched_ext_ops__exit(struct scx_exit_info * info)5534 static void sched_ext_ops__exit(struct scx_exit_info *info) {}
sched_ext_ops__dump(struct scx_dump_ctx * ctx)5535 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {}
sched_ext_ops__dump_cpu(struct scx_dump_ctx * ctx,s32 cpu,bool idle)5536 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
sched_ext_ops__dump_task(struct scx_dump_ctx * ctx,struct task_struct * p)5537 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {}
5538
5539 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
5540 .select_cpu = sched_ext_ops__select_cpu,
5541 .enqueue = sched_ext_ops__enqueue,
5542 .dequeue = sched_ext_ops__dequeue,
5543 .dispatch = sched_ext_ops__dispatch,
5544 .tick = sched_ext_ops__tick,
5545 .runnable = sched_ext_ops__runnable,
5546 .running = sched_ext_ops__running,
5547 .stopping = sched_ext_ops__stopping,
5548 .quiescent = sched_ext_ops__quiescent,
5549 .yield = sched_ext_ops__yield,
5550 .core_sched_before = sched_ext_ops__core_sched_before,
5551 .set_weight = sched_ext_ops__set_weight,
5552 .set_cpumask = sched_ext_ops__set_cpumask,
5553 .update_idle = sched_ext_ops__update_idle,
5554 .cpu_acquire = sched_ext_ops__cpu_acquire,
5555 .cpu_release = sched_ext_ops__cpu_release,
5556 .init_task = sched_ext_ops__init_task,
5557 .exit_task = sched_ext_ops__exit_task,
5558 .enable = sched_ext_ops__enable,
5559 .disable = sched_ext_ops__disable,
5560 #ifdef CONFIG_EXT_GROUP_SCHED
5561 .cgroup_init = sched_ext_ops__cgroup_init,
5562 .cgroup_exit = sched_ext_ops__cgroup_exit,
5563 .cgroup_prep_move = sched_ext_ops__cgroup_prep_move,
5564 .cgroup_move = sched_ext_ops__cgroup_move,
5565 .cgroup_cancel_move = sched_ext_ops__cgroup_cancel_move,
5566 .cgroup_set_weight = sched_ext_ops__cgroup_set_weight,
5567 .cgroup_set_bandwidth = sched_ext_ops__cgroup_set_bandwidth,
5568 .cgroup_set_idle = sched_ext_ops__cgroup_set_idle,
5569 #endif
5570 .cpu_online = sched_ext_ops__cpu_online,
5571 .cpu_offline = sched_ext_ops__cpu_offline,
5572 .init = sched_ext_ops__init,
5573 .exit = sched_ext_ops__exit,
5574 .dump = sched_ext_ops__dump,
5575 .dump_cpu = sched_ext_ops__dump_cpu,
5576 .dump_task = sched_ext_ops__dump_task,
5577 };
5578
5579 static struct bpf_struct_ops bpf_sched_ext_ops = {
5580 .verifier_ops = &bpf_scx_verifier_ops,
5581 .reg = bpf_scx_reg,
5582 .unreg = bpf_scx_unreg,
5583 .check_member = bpf_scx_check_member,
5584 .init_member = bpf_scx_init_member,
5585 .init = bpf_scx_init,
5586 .update = bpf_scx_update,
5587 .validate = bpf_scx_validate,
5588 .name = "sched_ext_ops",
5589 .owner = THIS_MODULE,
5590 .cfi_stubs = &__bpf_ops_sched_ext_ops
5591 };
5592
5593
5594 /********************************************************************************
5595 * System integration and init.
5596 */
5597
sysrq_handle_sched_ext_reset(u8 key)5598 static void sysrq_handle_sched_ext_reset(u8 key)
5599 {
5600 scx_disable(SCX_EXIT_SYSRQ);
5601 }
5602
5603 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
5604 .handler = sysrq_handle_sched_ext_reset,
5605 .help_msg = "reset-sched-ext(S)",
5606 .action_msg = "Disable sched_ext and revert all tasks to CFS",
5607 .enable_mask = SYSRQ_ENABLE_RTNICE,
5608 };
5609
sysrq_handle_sched_ext_dump(u8 key)5610 static void sysrq_handle_sched_ext_dump(u8 key)
5611 {
5612 struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
5613
5614 if (scx_enabled())
5615 scx_dump_state(&ei, 0);
5616 }
5617
5618 static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
5619 .handler = sysrq_handle_sched_ext_dump,
5620 .help_msg = "dump-sched-ext(D)",
5621 .action_msg = "Trigger sched_ext debug dump",
5622 .enable_mask = SYSRQ_ENABLE_RTNICE,
5623 };
5624
can_skip_idle_kick(struct rq * rq)5625 static bool can_skip_idle_kick(struct rq *rq)
5626 {
5627 lockdep_assert_rq_held(rq);
5628
5629 /*
5630 * We can skip idle kicking if @rq is going to go through at least one
5631 * full SCX scheduling cycle before going idle. Just checking whether
5632 * curr is not idle is insufficient because we could be racing
5633 * balance_one() trying to pull the next task from a remote rq, which
5634 * may fail, and @rq may become idle afterwards.
5635 *
5636 * The race window is small and we don't and can't guarantee that @rq is
5637 * only kicked while idle anyway. Skip only when sure.
5638 */
5639 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
5640 }
5641
kick_one_cpu(s32 cpu,struct rq * this_rq,unsigned long * ksyncs)5642 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *ksyncs)
5643 {
5644 struct rq *rq = cpu_rq(cpu);
5645 struct scx_rq *this_scx = &this_rq->scx;
5646 const struct sched_class *cur_class;
5647 bool should_wait = false;
5648 unsigned long flags;
5649
5650 raw_spin_rq_lock_irqsave(rq, flags);
5651 cur_class = rq->curr->sched_class;
5652
5653 /*
5654 * During CPU hotplug, a CPU may depend on kicking itself to make
5655 * forward progress. Allow kicking self regardless of online state. If
5656 * @cpu is running a higher class task, we have no control over @cpu.
5657 * Skip kicking.
5658 */
5659 if ((cpu_online(cpu) || cpu == cpu_of(this_rq)) &&
5660 !sched_class_above(cur_class, &ext_sched_class)) {
5661 if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
5662 if (cur_class == &ext_sched_class)
5663 rq->curr->scx.slice = 0;
5664 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5665 }
5666
5667 if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
5668 if (cur_class == &ext_sched_class) {
5669 cpumask_set_cpu(cpu, this_scx->cpus_to_sync);
5670 ksyncs[cpu] = rq->scx.kick_sync;
5671 should_wait = true;
5672 }
5673 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5674 }
5675
5676 resched_curr(rq);
5677 } else {
5678 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5679 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5680 }
5681
5682 raw_spin_rq_unlock_irqrestore(rq, flags);
5683
5684 return should_wait;
5685 }
5686
kick_one_cpu_if_idle(s32 cpu,struct rq * this_rq)5687 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
5688 {
5689 struct rq *rq = cpu_rq(cpu);
5690 unsigned long flags;
5691
5692 raw_spin_rq_lock_irqsave(rq, flags);
5693
5694 if (!can_skip_idle_kick(rq) &&
5695 (cpu_online(cpu) || cpu == cpu_of(this_rq)))
5696 resched_curr(rq);
5697
5698 raw_spin_rq_unlock_irqrestore(rq, flags);
5699 }
5700
kick_cpus_irq_workfn(struct irq_work * irq_work)5701 static void kick_cpus_irq_workfn(struct irq_work *irq_work)
5702 {
5703 struct rq *this_rq = this_rq();
5704 struct scx_rq *this_scx = &this_rq->scx;
5705 struct scx_kick_syncs __rcu *ksyncs_pcpu = __this_cpu_read(scx_kick_syncs);
5706 bool should_wait = false;
5707 unsigned long *ksyncs;
5708 s32 cpu;
5709
5710 if (unlikely(!ksyncs_pcpu)) {
5711 pr_warn_once("kick_cpus_irq_workfn() called with NULL scx_kick_syncs");
5712 return;
5713 }
5714
5715 ksyncs = rcu_dereference_bh(ksyncs_pcpu)->syncs;
5716
5717 for_each_cpu(cpu, this_scx->cpus_to_kick) {
5718 should_wait |= kick_one_cpu(cpu, this_rq, ksyncs);
5719 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
5720 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5721 }
5722
5723 for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
5724 kick_one_cpu_if_idle(cpu, this_rq);
5725 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5726 }
5727
5728 /*
5729 * Can't wait in hardirq — kick_sync can't advance, deadlocking if
5730 * CPUs wait for each other. Defer to kick_sync_wait_bal_cb().
5731 */
5732 if (should_wait) {
5733 raw_spin_rq_lock(this_rq);
5734 this_scx->kick_sync_pending = true;
5735 resched_curr(this_rq);
5736 raw_spin_rq_unlock(this_rq);
5737 }
5738 }
5739
5740 /**
5741 * print_scx_info - print out sched_ext scheduler state
5742 * @log_lvl: the log level to use when printing
5743 * @p: target task
5744 *
5745 * If a sched_ext scheduler is enabled, print the name and state of the
5746 * scheduler. If @p is on sched_ext, print further information about the task.
5747 *
5748 * This function can be safely called on any task as long as the task_struct
5749 * itself is accessible. While safe, this function isn't synchronized and may
5750 * print out mixups or garbages of limited length.
5751 */
print_scx_info(const char * log_lvl,struct task_struct * p)5752 void print_scx_info(const char *log_lvl, struct task_struct *p)
5753 {
5754 struct scx_sched *sch = scx_root;
5755 enum scx_enable_state state = scx_enable_state();
5756 const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
5757 char runnable_at_buf[22] = "?";
5758 struct sched_class *class;
5759 unsigned long runnable_at;
5760
5761 if (state == SCX_DISABLED)
5762 return;
5763
5764 /*
5765 * Carefully check if the task was running on sched_ext, and then
5766 * carefully copy the time it's been runnable, and its state.
5767 */
5768 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
5769 class != &ext_sched_class) {
5770 printk("%sSched_ext: %s (%s%s)", log_lvl, sch->ops.name,
5771 scx_enable_state_str[state], all);
5772 return;
5773 }
5774
5775 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
5776 sizeof(runnable_at)))
5777 scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
5778 jiffies_delta_msecs(runnable_at, jiffies));
5779
5780 /* print everything onto one line to conserve console space */
5781 printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
5782 log_lvl, sch->ops.name, scx_enable_state_str[state], all,
5783 runnable_at_buf);
5784 }
5785
scx_pm_handler(struct notifier_block * nb,unsigned long event,void * ptr)5786 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
5787 {
5788 /*
5789 * SCX schedulers often have userspace components which are sometimes
5790 * involved in critial scheduling paths. PM operations involve freezing
5791 * userspace which can lead to scheduling misbehaviors including stalls.
5792 * Let's bypass while PM operations are in progress.
5793 */
5794 switch (event) {
5795 case PM_HIBERNATION_PREPARE:
5796 case PM_SUSPEND_PREPARE:
5797 case PM_RESTORE_PREPARE:
5798 scx_bypass(true);
5799 break;
5800 case PM_POST_HIBERNATION:
5801 case PM_POST_SUSPEND:
5802 case PM_POST_RESTORE:
5803 scx_bypass(false);
5804 break;
5805 }
5806
5807 return NOTIFY_OK;
5808 }
5809
5810 static struct notifier_block scx_pm_notifier = {
5811 .notifier_call = scx_pm_handler,
5812 };
5813
init_sched_ext_class(void)5814 void __init init_sched_ext_class(void)
5815 {
5816 s32 cpu, v;
5817
5818 /*
5819 * The following is to prevent the compiler from optimizing out the enum
5820 * definitions so that BPF scheduler implementations can use them
5821 * through the generated vmlinux.h.
5822 */
5823 WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
5824 SCX_TG_ONLINE);
5825
5826 scx_idle_init_masks();
5827
5828 for_each_possible_cpu(cpu) {
5829 struct rq *rq = cpu_rq(cpu);
5830 int n = cpu_to_node(cpu);
5831
5832 init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
5833 init_dsq(&rq->scx.bypass_dsq, SCX_DSQ_BYPASS);
5834 INIT_LIST_HEAD(&rq->scx.runnable_list);
5835 INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
5836
5837 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n));
5838 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n));
5839 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n));
5840 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n));
5841 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_sync, GFP_KERNEL, n));
5842 rq->scx.deferred_irq_work = IRQ_WORK_INIT_HARD(deferred_irq_workfn);
5843 rq->scx.kick_cpus_irq_work = IRQ_WORK_INIT_HARD(kick_cpus_irq_workfn);
5844
5845 if (cpu_online(cpu))
5846 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
5847 }
5848
5849 register_sysrq_key('S', &sysrq_sched_ext_reset_op);
5850 register_sysrq_key('D', &sysrq_sched_ext_dump_op);
5851 INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
5852 }
5853
5854
5855 /********************************************************************************
5856 * Helpers that can be called from the BPF scheduler.
5857 */
scx_dsq_insert_preamble(struct scx_sched * sch,struct task_struct * p,u64 enq_flags)5858 static bool scx_dsq_insert_preamble(struct scx_sched *sch, struct task_struct *p,
5859 u64 enq_flags)
5860 {
5861 if (!scx_kf_allowed(sch, SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
5862 return false;
5863
5864 lockdep_assert_irqs_disabled();
5865
5866 if (unlikely(!p)) {
5867 scx_error(sch, "called with NULL task");
5868 return false;
5869 }
5870
5871 if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
5872 scx_error(sch, "invalid enq_flags 0x%llx", enq_flags);
5873 return false;
5874 }
5875
5876 return true;
5877 }
5878
scx_dsq_insert_commit(struct scx_sched * sch,struct task_struct * p,u64 dsq_id,u64 enq_flags)5879 static void scx_dsq_insert_commit(struct scx_sched *sch, struct task_struct *p,
5880 u64 dsq_id, u64 enq_flags)
5881 {
5882 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
5883 struct task_struct *ddsp_task;
5884
5885 ddsp_task = __this_cpu_read(direct_dispatch_task);
5886 if (ddsp_task) {
5887 mark_direct_dispatch(sch, ddsp_task, p, dsq_id, enq_flags);
5888 return;
5889 }
5890
5891 if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
5892 scx_error(sch, "dispatch buffer overflow");
5893 return;
5894 }
5895
5896 dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
5897 .task = p,
5898 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
5899 .dsq_id = dsq_id,
5900 .enq_flags = enq_flags,
5901 };
5902 }
5903
5904 __bpf_kfunc_start_defs();
5905
5906 /**
5907 * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ
5908 * @p: task_struct to insert
5909 * @dsq_id: DSQ to insert into
5910 * @slice: duration @p can run for in nsecs, 0 to keep the current value
5911 * @enq_flags: SCX_ENQ_*
5912 *
5913 * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to
5914 * call this function spuriously. Can be called from ops.enqueue(),
5915 * ops.select_cpu(), and ops.dispatch().
5916 *
5917 * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
5918 * and @p must match the task being enqueued.
5919 *
5920 * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
5921 * will be directly inserted into the corresponding dispatch queue after
5922 * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be
5923 * inserted into the local DSQ of the CPU returned by ops.select_cpu().
5924 * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
5925 * task is inserted.
5926 *
5927 * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
5928 * and this function can be called upto ops.dispatch_max_batch times to insert
5929 * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
5930 * remaining slots. scx_bpf_dsq_move_to_local() flushes the batch and resets the
5931 * counter.
5932 *
5933 * This function doesn't have any locking restrictions and may be called under
5934 * BPF locks (in the future when BPF introduces more flexible locking).
5935 *
5936 * @p is allowed to run for @slice. The scheduling path is triggered on slice
5937 * exhaustion. If zero, the current residual slice is maintained. If
5938 * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
5939 * scx_bpf_kick_cpu() to trigger scheduling.
5940 *
5941 * Returns %true on successful insertion, %false on failure. On the root
5942 * scheduler, %false return triggers scheduler abort and the caller doesn't need
5943 * to check the return value.
5944 */
scx_bpf_dsq_insert___v2(struct task_struct * p,u64 dsq_id,u64 slice,u64 enq_flags)5945 __bpf_kfunc bool scx_bpf_dsq_insert___v2(struct task_struct *p, u64 dsq_id,
5946 u64 slice, u64 enq_flags)
5947 {
5948 struct scx_sched *sch;
5949
5950 guard(rcu)();
5951 sch = rcu_dereference(scx_root);
5952 if (unlikely(!sch))
5953 return false;
5954
5955 if (!scx_dsq_insert_preamble(sch, p, enq_flags))
5956 return false;
5957
5958 if (slice)
5959 p->scx.slice = slice;
5960 else
5961 p->scx.slice = p->scx.slice ?: 1;
5962
5963 scx_dsq_insert_commit(sch, p, dsq_id, enq_flags);
5964
5965 return true;
5966 }
5967
5968 /*
5969 * COMPAT: Will be removed in v6.23 along with the ___v2 suffix.
5970 */
scx_bpf_dsq_insert(struct task_struct * p,u64 dsq_id,u64 slice,u64 enq_flags)5971 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id,
5972 u64 slice, u64 enq_flags)
5973 {
5974 scx_bpf_dsq_insert___v2(p, dsq_id, slice, enq_flags);
5975 }
5976
scx_dsq_insert_vtime(struct scx_sched * sch,struct task_struct * p,u64 dsq_id,u64 slice,u64 vtime,u64 enq_flags)5977 static bool scx_dsq_insert_vtime(struct scx_sched *sch, struct task_struct *p,
5978 u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags)
5979 {
5980 if (!scx_dsq_insert_preamble(sch, p, enq_flags))
5981 return false;
5982
5983 if (slice)
5984 p->scx.slice = slice;
5985 else
5986 p->scx.slice = p->scx.slice ?: 1;
5987
5988 p->scx.dsq_vtime = vtime;
5989
5990 scx_dsq_insert_commit(sch, p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
5991
5992 return true;
5993 }
5994
5995 struct scx_bpf_dsq_insert_vtime_args {
5996 /* @p can't be packed together as KF_RCU is not transitive */
5997 u64 dsq_id;
5998 u64 slice;
5999 u64 vtime;
6000 u64 enq_flags;
6001 };
6002
6003 /**
6004 * __scx_bpf_dsq_insert_vtime - Arg-wrapped vtime DSQ insertion
6005 * @p: task_struct to insert
6006 * @args: struct containing the rest of the arguments
6007 * @args->dsq_id: DSQ to insert into
6008 * @args->slice: duration @p can run for in nsecs, 0 to keep the current value
6009 * @args->vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
6010 * @args->enq_flags: SCX_ENQ_*
6011 *
6012 * Wrapper kfunc that takes arguments via struct to work around BPF's 5 argument
6013 * limit. BPF programs should use scx_bpf_dsq_insert_vtime() which is provided
6014 * as an inline wrapper in common.bpf.h.
6015 *
6016 * Insert @p into the vtime priority queue of the DSQ identified by
6017 * @args->dsq_id. Tasks queued into the priority queue are ordered by
6018 * @args->vtime. All other aspects are identical to scx_bpf_dsq_insert().
6019 *
6020 * @args->vtime ordering is according to time_before64() which considers
6021 * wrapping. A numerically larger vtime may indicate an earlier position in the
6022 * ordering and vice-versa.
6023 *
6024 * A DSQ can only be used as a FIFO or priority queue at any given time and this
6025 * function must not be called on a DSQ which already has one or more FIFO tasks
6026 * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and
6027 * SCX_DSQ_GLOBAL) cannot be used as priority queues.
6028 *
6029 * Returns %true on successful insertion, %false on failure. On the root
6030 * scheduler, %false return triggers scheduler abort and the caller doesn't need
6031 * to check the return value.
6032 */
6033 __bpf_kfunc bool
__scx_bpf_dsq_insert_vtime(struct task_struct * p,struct scx_bpf_dsq_insert_vtime_args * args)6034 __scx_bpf_dsq_insert_vtime(struct task_struct *p,
6035 struct scx_bpf_dsq_insert_vtime_args *args)
6036 {
6037 struct scx_sched *sch;
6038
6039 guard(rcu)();
6040
6041 sch = rcu_dereference(scx_root);
6042 if (unlikely(!sch))
6043 return false;
6044
6045 return scx_dsq_insert_vtime(sch, p, args->dsq_id, args->slice,
6046 args->vtime, args->enq_flags);
6047 }
6048
6049 /*
6050 * COMPAT: Will be removed in v6.23.
6051 */
scx_bpf_dsq_insert_vtime(struct task_struct * p,u64 dsq_id,u64 slice,u64 vtime,u64 enq_flags)6052 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id,
6053 u64 slice, u64 vtime, u64 enq_flags)
6054 {
6055 struct scx_sched *sch;
6056
6057 guard(rcu)();
6058
6059 sch = rcu_dereference(scx_root);
6060 if (unlikely(!sch))
6061 return;
6062
6063 scx_dsq_insert_vtime(sch, p, dsq_id, slice, vtime, enq_flags);
6064 }
6065
6066 __bpf_kfunc_end_defs();
6067
6068 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
6069 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU)
6070 BTF_ID_FLAGS(func, scx_bpf_dsq_insert___v2, KF_RCU)
6071 BTF_ID_FLAGS(func, __scx_bpf_dsq_insert_vtime, KF_RCU)
6072 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU)
6073 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
6074
6075 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
6076 .owner = THIS_MODULE,
6077 .set = &scx_kfunc_ids_enqueue_dispatch,
6078 };
6079
scx_dsq_move(struct bpf_iter_scx_dsq_kern * kit,struct task_struct * p,u64 dsq_id,u64 enq_flags)6080 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit,
6081 struct task_struct *p, u64 dsq_id, u64 enq_flags)
6082 {
6083 struct scx_sched *sch = scx_root;
6084 struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
6085 struct rq *this_rq, *src_rq, *locked_rq;
6086 bool dispatched = false;
6087 bool in_balance;
6088 unsigned long flags;
6089
6090 if (!scx_kf_allowed_if_unlocked() &&
6091 !scx_kf_allowed(sch, SCX_KF_DISPATCH))
6092 return false;
6093
6094 /*
6095 * If the BPF scheduler keeps calling this function repeatedly, it can
6096 * cause similar live-lock conditions as consume_dispatch_q().
6097 */
6098 if (unlikely(READ_ONCE(scx_aborting)))
6099 return false;
6100
6101 /*
6102 * Can be called from either ops.dispatch() locking this_rq() or any
6103 * context where no rq lock is held. If latter, lock @p's task_rq which
6104 * we'll likely need anyway.
6105 */
6106 src_rq = task_rq(p);
6107
6108 local_irq_save(flags);
6109 this_rq = this_rq();
6110 in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
6111
6112 if (in_balance) {
6113 if (this_rq != src_rq) {
6114 raw_spin_rq_unlock(this_rq);
6115 raw_spin_rq_lock(src_rq);
6116 }
6117 } else {
6118 raw_spin_rq_lock(src_rq);
6119 }
6120
6121 locked_rq = src_rq;
6122 raw_spin_lock(&src_dsq->lock);
6123
6124 /*
6125 * Did someone else get to it? @p could have already left $src_dsq, got
6126 * re-enqueud, or be in the process of being consumed by someone else.
6127 */
6128 if (unlikely(p->scx.dsq != src_dsq ||
6129 u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
6130 p->scx.holding_cpu >= 0) ||
6131 WARN_ON_ONCE(src_rq != task_rq(p))) {
6132 raw_spin_unlock(&src_dsq->lock);
6133 goto out;
6134 }
6135
6136 /* @p is still on $src_dsq and stable, determine the destination */
6137 dst_dsq = find_dsq_for_dispatch(sch, this_rq, dsq_id, p);
6138
6139 /*
6140 * Apply vtime and slice updates before moving so that the new time is
6141 * visible before inserting into $dst_dsq. @p is still on $src_dsq but
6142 * this is safe as we're locking it.
6143 */
6144 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
6145 p->scx.dsq_vtime = kit->vtime;
6146 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
6147 p->scx.slice = kit->slice;
6148
6149 /* execute move */
6150 locked_rq = move_task_between_dsqs(sch, p, enq_flags, src_dsq, dst_dsq);
6151 dispatched = true;
6152 out:
6153 if (in_balance) {
6154 if (this_rq != locked_rq) {
6155 raw_spin_rq_unlock(locked_rq);
6156 raw_spin_rq_lock(this_rq);
6157 }
6158 } else {
6159 raw_spin_rq_unlock_irqrestore(locked_rq, flags);
6160 }
6161
6162 kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
6163 __SCX_DSQ_ITER_HAS_VTIME);
6164 return dispatched;
6165 }
6166
6167 __bpf_kfunc_start_defs();
6168
6169 /**
6170 * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
6171 *
6172 * Can only be called from ops.dispatch().
6173 */
scx_bpf_dispatch_nr_slots(void)6174 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
6175 {
6176 struct scx_sched *sch;
6177
6178 guard(rcu)();
6179
6180 sch = rcu_dereference(scx_root);
6181 if (unlikely(!sch))
6182 return 0;
6183
6184 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
6185 return 0;
6186
6187 return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
6188 }
6189
6190 /**
6191 * scx_bpf_dispatch_cancel - Cancel the latest dispatch
6192 *
6193 * Cancel the latest dispatch. Can be called multiple times to cancel further
6194 * dispatches. Can only be called from ops.dispatch().
6195 */
scx_bpf_dispatch_cancel(void)6196 __bpf_kfunc void scx_bpf_dispatch_cancel(void)
6197 {
6198 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6199 struct scx_sched *sch;
6200
6201 guard(rcu)();
6202
6203 sch = rcu_dereference(scx_root);
6204 if (unlikely(!sch))
6205 return;
6206
6207 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
6208 return;
6209
6210 if (dspc->cursor > 0)
6211 dspc->cursor--;
6212 else
6213 scx_error(sch, "dispatch buffer underflow");
6214 }
6215
6216 /**
6217 * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ
6218 * @dsq_id: DSQ to move task from
6219 *
6220 * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's
6221 * local DSQ for execution. Can only be called from ops.dispatch().
6222 *
6223 * This function flushes the in-flight dispatches from scx_bpf_dsq_insert()
6224 * before trying to move from the specified DSQ. It may also grab rq locks and
6225 * thus can't be called under any BPF locks.
6226 *
6227 * Returns %true if a task has been moved, %false if there isn't any task to
6228 * move.
6229 */
scx_bpf_dsq_move_to_local(u64 dsq_id)6230 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
6231 {
6232 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6233 struct scx_dispatch_q *dsq;
6234 struct scx_sched *sch;
6235
6236 guard(rcu)();
6237
6238 sch = rcu_dereference(scx_root);
6239 if (unlikely(!sch))
6240 return false;
6241
6242 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
6243 return false;
6244
6245 flush_dispatch_buf(sch, dspc->rq);
6246
6247 dsq = find_user_dsq(sch, dsq_id);
6248 if (unlikely(!dsq)) {
6249 scx_error(sch, "invalid DSQ ID 0x%016llx", dsq_id);
6250 return false;
6251 }
6252
6253 if (consume_dispatch_q(sch, dspc->rq, dsq)) {
6254 /*
6255 * A successfully consumed task can be dequeued before it starts
6256 * running while the CPU is trying to migrate other dispatched
6257 * tasks. Bump nr_tasks to tell balance_one() to retry on empty
6258 * local DSQ.
6259 */
6260 dspc->nr_tasks++;
6261 return true;
6262 } else {
6263 return false;
6264 }
6265 }
6266
6267 /**
6268 * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs
6269 * @it__iter: DSQ iterator in progress
6270 * @slice: duration the moved task can run for in nsecs
6271 *
6272 * Override the slice of the next task that will be moved from @it__iter using
6273 * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous
6274 * slice duration is kept.
6275 */
scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq * it__iter,u64 slice)6276 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter,
6277 u64 slice)
6278 {
6279 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6280
6281 kit->slice = slice;
6282 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
6283 }
6284
6285 /**
6286 * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs
6287 * @it__iter: DSQ iterator in progress
6288 * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
6289 *
6290 * Override the vtime of the next task that will be moved from @it__iter using
6291 * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice
6292 * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the
6293 * override is ignored and cleared.
6294 */
scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq * it__iter,u64 vtime)6295 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter,
6296 u64 vtime)
6297 {
6298 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6299
6300 kit->vtime = vtime;
6301 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
6302 }
6303
6304 /**
6305 * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ
6306 * @it__iter: DSQ iterator in progress
6307 * @p: task to transfer
6308 * @dsq_id: DSQ to move @p to
6309 * @enq_flags: SCX_ENQ_*
6310 *
6311 * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
6312 * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
6313 * be the destination.
6314 *
6315 * For the transfer to be successful, @p must still be on the DSQ and have been
6316 * queued before the DSQ iteration started. This function doesn't care whether
6317 * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
6318 * been queued before the iteration started.
6319 *
6320 * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update.
6321 *
6322 * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
6323 * lock (e.g. BPF timers or SYSCALL programs).
6324 *
6325 * Returns %true if @p has been consumed, %false if @p had already been
6326 * consumed, dequeued, or, for sub-scheds, @dsq_id points to a disallowed local
6327 * DSQ.
6328 */
scx_bpf_dsq_move(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6329 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter,
6330 struct task_struct *p, u64 dsq_id,
6331 u64 enq_flags)
6332 {
6333 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6334 p, dsq_id, enq_flags);
6335 }
6336
6337 /**
6338 * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ
6339 * @it__iter: DSQ iterator in progress
6340 * @p: task to transfer
6341 * @dsq_id: DSQ to move @p to
6342 * @enq_flags: SCX_ENQ_*
6343 *
6344 * Transfer @p which is on the DSQ currently iterated by @it__iter to the
6345 * priority queue of the DSQ specified by @dsq_id. The destination must be a
6346 * user DSQ as only user DSQs support priority queue.
6347 *
6348 * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice()
6349 * and scx_bpf_dsq_move_set_vtime() to update.
6350 *
6351 * All other aspects are identical to scx_bpf_dsq_move(). See
6352 * scx_bpf_dsq_insert_vtime() for more information on @vtime.
6353 */
scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6354 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter,
6355 struct task_struct *p, u64 dsq_id,
6356 u64 enq_flags)
6357 {
6358 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6359 p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6360 }
6361
6362 __bpf_kfunc_end_defs();
6363
6364 BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
6365 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
6366 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
6367 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local)
6368 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU)
6369 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU)
6370 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6371 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6372 BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
6373
6374 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
6375 .owner = THIS_MODULE,
6376 .set = &scx_kfunc_ids_dispatch,
6377 };
6378
reenq_local(struct rq * rq)6379 static u32 reenq_local(struct rq *rq)
6380 {
6381 LIST_HEAD(tasks);
6382 u32 nr_enqueued = 0;
6383 struct task_struct *p, *n;
6384
6385 lockdep_assert_rq_held(rq);
6386
6387 /*
6388 * The BPF scheduler may choose to dispatch tasks back to
6389 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
6390 * first to avoid processing the same tasks repeatedly.
6391 */
6392 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
6393 scx.dsq_list.node) {
6394 /*
6395 * If @p is being migrated, @p's current CPU may not agree with
6396 * its allowed CPUs and the migration_cpu_stop is about to
6397 * deactivate and re-activate @p anyway. Skip re-enqueueing.
6398 *
6399 * While racing sched property changes may also dequeue and
6400 * re-enqueue a migrating task while its current CPU and allowed
6401 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
6402 * the current local DSQ for running tasks and thus are not
6403 * visible to the BPF scheduler.
6404 */
6405 if (p->migration_pending)
6406 continue;
6407
6408 dispatch_dequeue(rq, p);
6409 list_add_tail(&p->scx.dsq_list.node, &tasks);
6410 }
6411
6412 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
6413 list_del_init(&p->scx.dsq_list.node);
6414 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
6415 nr_enqueued++;
6416 }
6417
6418 return nr_enqueued;
6419 }
6420
6421 __bpf_kfunc_start_defs();
6422
6423 /**
6424 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
6425 *
6426 * Iterate over all of the tasks currently enqueued on the local DSQ of the
6427 * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
6428 * processed tasks. Can only be called from ops.cpu_release().
6429 *
6430 * COMPAT: Will be removed in v6.23 along with the ___v2 suffix on the void
6431 * returning variant that can be called from anywhere.
6432 */
scx_bpf_reenqueue_local(void)6433 __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
6434 {
6435 struct scx_sched *sch;
6436 struct rq *rq;
6437
6438 guard(rcu)();
6439 sch = rcu_dereference(scx_root);
6440 if (unlikely(!sch))
6441 return 0;
6442
6443 if (!scx_kf_allowed(sch, SCX_KF_CPU_RELEASE))
6444 return 0;
6445
6446 rq = cpu_rq(smp_processor_id());
6447 lockdep_assert_rq_held(rq);
6448
6449 return reenq_local(rq);
6450 }
6451
6452 __bpf_kfunc_end_defs();
6453
6454 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
6455 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
6456 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
6457
6458 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
6459 .owner = THIS_MODULE,
6460 .set = &scx_kfunc_ids_cpu_release,
6461 };
6462
6463 __bpf_kfunc_start_defs();
6464
6465 /**
6466 * scx_bpf_create_dsq - Create a custom DSQ
6467 * @dsq_id: DSQ to create
6468 * @node: NUMA node to allocate from
6469 *
6470 * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
6471 * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
6472 */
scx_bpf_create_dsq(u64 dsq_id,s32 node)6473 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
6474 {
6475 struct scx_dispatch_q *dsq;
6476 struct scx_sched *sch;
6477 s32 ret;
6478
6479 if (unlikely(node >= (int)nr_node_ids ||
6480 (node < 0 && node != NUMA_NO_NODE)))
6481 return -EINVAL;
6482
6483 if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN))
6484 return -EINVAL;
6485
6486 dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
6487 if (!dsq)
6488 return -ENOMEM;
6489
6490 init_dsq(dsq, dsq_id);
6491
6492 rcu_read_lock();
6493
6494 sch = rcu_dereference(scx_root);
6495 if (sch)
6496 ret = rhashtable_lookup_insert_fast(&sch->dsq_hash, &dsq->hash_node,
6497 dsq_hash_params);
6498 else
6499 ret = -ENODEV;
6500
6501 rcu_read_unlock();
6502 if (ret)
6503 kfree(dsq);
6504 return ret;
6505 }
6506
6507 __bpf_kfunc_end_defs();
6508
6509 BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
6510 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
6511 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU)
6512 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU)
6513 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6514 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
6515 BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
6516
6517 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
6518 .owner = THIS_MODULE,
6519 .set = &scx_kfunc_ids_unlocked,
6520 };
6521
6522 __bpf_kfunc_start_defs();
6523
6524 /**
6525 * scx_bpf_task_set_slice - Set task's time slice
6526 * @p: task of interest
6527 * @slice: time slice to set in nsecs
6528 *
6529 * Set @p's time slice to @slice. Returns %true on success, %false if the
6530 * calling scheduler doesn't have authority over @p.
6531 */
scx_bpf_task_set_slice(struct task_struct * p,u64 slice)6532 __bpf_kfunc bool scx_bpf_task_set_slice(struct task_struct *p, u64 slice)
6533 {
6534 p->scx.slice = slice;
6535 return true;
6536 }
6537
6538 /**
6539 * scx_bpf_task_set_dsq_vtime - Set task's virtual time for DSQ ordering
6540 * @p: task of interest
6541 * @vtime: virtual time to set
6542 *
6543 * Set @p's virtual time to @vtime. Returns %true on success, %false if the
6544 * calling scheduler doesn't have authority over @p.
6545 */
scx_bpf_task_set_dsq_vtime(struct task_struct * p,u64 vtime)6546 __bpf_kfunc bool scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime)
6547 {
6548 p->scx.dsq_vtime = vtime;
6549 return true;
6550 }
6551
scx_kick_cpu(struct scx_sched * sch,s32 cpu,u64 flags)6552 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags)
6553 {
6554 struct rq *this_rq;
6555 unsigned long irq_flags;
6556
6557 if (!ops_cpu_valid(sch, cpu, NULL))
6558 return;
6559
6560 local_irq_save(irq_flags);
6561
6562 this_rq = this_rq();
6563
6564 /*
6565 * While bypassing for PM ops, IRQ handling may not be online which can
6566 * lead to irq_work_queue() malfunction such as infinite busy wait for
6567 * IRQ status update. Suppress kicking.
6568 */
6569 if (scx_rq_bypassing(this_rq))
6570 goto out;
6571
6572 /*
6573 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
6574 * rq locks. We can probably be smarter and avoid bouncing if called
6575 * from ops which don't hold a rq lock.
6576 */
6577 if (flags & SCX_KICK_IDLE) {
6578 struct rq *target_rq = cpu_rq(cpu);
6579
6580 if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
6581 scx_error(sch, "PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
6582
6583 if (raw_spin_rq_trylock(target_rq)) {
6584 if (can_skip_idle_kick(target_rq)) {
6585 raw_spin_rq_unlock(target_rq);
6586 goto out;
6587 }
6588 raw_spin_rq_unlock(target_rq);
6589 }
6590 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
6591 } else {
6592 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
6593
6594 if (flags & SCX_KICK_PREEMPT)
6595 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
6596 if (flags & SCX_KICK_WAIT)
6597 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
6598 }
6599
6600 irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
6601 out:
6602 local_irq_restore(irq_flags);
6603 }
6604
6605 /**
6606 * scx_bpf_kick_cpu - Trigger reschedule on a CPU
6607 * @cpu: cpu to kick
6608 * @flags: %SCX_KICK_* flags
6609 *
6610 * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
6611 * trigger rescheduling on a busy CPU. This can be called from any online
6612 * scx_ops operation and the actual kicking is performed asynchronously through
6613 * an irq work.
6614 */
scx_bpf_kick_cpu(s32 cpu,u64 flags)6615 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
6616 {
6617 struct scx_sched *sch;
6618
6619 guard(rcu)();
6620 sch = rcu_dereference(scx_root);
6621 if (likely(sch))
6622 scx_kick_cpu(sch, cpu, flags);
6623 }
6624
6625 /**
6626 * scx_bpf_dsq_nr_queued - Return the number of queued tasks
6627 * @dsq_id: id of the DSQ
6628 *
6629 * Return the number of tasks in the DSQ matching @dsq_id. If not found,
6630 * -%ENOENT is returned.
6631 */
scx_bpf_dsq_nr_queued(u64 dsq_id)6632 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
6633 {
6634 struct scx_sched *sch;
6635 struct scx_dispatch_q *dsq;
6636 s32 ret;
6637
6638 preempt_disable();
6639
6640 sch = rcu_dereference_sched(scx_root);
6641 if (unlikely(!sch)) {
6642 ret = -ENODEV;
6643 goto out;
6644 }
6645
6646 if (dsq_id == SCX_DSQ_LOCAL) {
6647 ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
6648 goto out;
6649 } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
6650 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
6651
6652 if (ops_cpu_valid(sch, cpu, NULL)) {
6653 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
6654 goto out;
6655 }
6656 } else {
6657 dsq = find_user_dsq(sch, dsq_id);
6658 if (dsq) {
6659 ret = READ_ONCE(dsq->nr);
6660 goto out;
6661 }
6662 }
6663 ret = -ENOENT;
6664 out:
6665 preempt_enable();
6666 return ret;
6667 }
6668
6669 /**
6670 * scx_bpf_destroy_dsq - Destroy a custom DSQ
6671 * @dsq_id: DSQ to destroy
6672 *
6673 * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
6674 * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
6675 * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
6676 * which doesn't exist. Can be called from any online scx_ops operations.
6677 */
scx_bpf_destroy_dsq(u64 dsq_id)6678 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
6679 {
6680 struct scx_sched *sch;
6681
6682 rcu_read_lock();
6683 sch = rcu_dereference(scx_root);
6684 if (sch)
6685 destroy_dsq(sch, dsq_id);
6686 rcu_read_unlock();
6687 }
6688
6689 /**
6690 * bpf_iter_scx_dsq_new - Create a DSQ iterator
6691 * @it: iterator to initialize
6692 * @dsq_id: DSQ to iterate
6693 * @flags: %SCX_DSQ_ITER_*
6694 *
6695 * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
6696 * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
6697 * tasks which are already queued when this function is invoked.
6698 */
bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq * it,u64 dsq_id,u64 flags)6699 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
6700 u64 flags)
6701 {
6702 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6703 struct scx_sched *sch;
6704
6705 BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
6706 sizeof(struct bpf_iter_scx_dsq));
6707 BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
6708 __alignof__(struct bpf_iter_scx_dsq));
6709 BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
6710 ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
6711
6712 /*
6713 * next() and destroy() will be called regardless of the return value.
6714 * Always clear $kit->dsq.
6715 */
6716 kit->dsq = NULL;
6717
6718 sch = rcu_dereference_check(scx_root, rcu_read_lock_bh_held());
6719 if (unlikely(!sch))
6720 return -ENODEV;
6721
6722 if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
6723 return -EINVAL;
6724
6725 kit->dsq = find_user_dsq(sch, dsq_id);
6726 if (!kit->dsq)
6727 return -ENOENT;
6728
6729 kit->cursor = INIT_DSQ_LIST_CURSOR(kit->cursor, flags,
6730 READ_ONCE(kit->dsq->seq));
6731
6732 return 0;
6733 }
6734
6735 /**
6736 * bpf_iter_scx_dsq_next - Progress a DSQ iterator
6737 * @it: iterator to progress
6738 *
6739 * Return the next task. See bpf_iter_scx_dsq_new().
6740 */
bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq * it)6741 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
6742 {
6743 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6744 bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
6745 struct task_struct *p;
6746 unsigned long flags;
6747
6748 if (!kit->dsq)
6749 return NULL;
6750
6751 raw_spin_lock_irqsave(&kit->dsq->lock, flags);
6752
6753 if (list_empty(&kit->cursor.node))
6754 p = NULL;
6755 else
6756 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
6757
6758 /*
6759 * Only tasks which were queued before the iteration started are
6760 * visible. This bounds BPF iterations and guarantees that vtime never
6761 * jumps in the other direction while iterating.
6762 */
6763 do {
6764 p = nldsq_next_task(kit->dsq, p, rev);
6765 } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
6766
6767 if (p) {
6768 if (rev)
6769 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
6770 else
6771 list_move(&kit->cursor.node, &p->scx.dsq_list.node);
6772 } else {
6773 list_del_init(&kit->cursor.node);
6774 }
6775
6776 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
6777
6778 return p;
6779 }
6780
6781 /**
6782 * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
6783 * @it: iterator to destroy
6784 *
6785 * Undo scx_iter_scx_dsq_new().
6786 */
bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq * it)6787 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
6788 {
6789 struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6790
6791 if (!kit->dsq)
6792 return;
6793
6794 if (!list_empty(&kit->cursor.node)) {
6795 unsigned long flags;
6796
6797 raw_spin_lock_irqsave(&kit->dsq->lock, flags);
6798 list_del_init(&kit->cursor.node);
6799 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
6800 }
6801 kit->dsq = NULL;
6802 }
6803
6804 /**
6805 * scx_bpf_dsq_peek - Lockless peek at the first element.
6806 * @dsq_id: DSQ to examine.
6807 *
6808 * Read the first element in the DSQ. This is semantically equivalent to using
6809 * the DSQ iterator, but is lockfree. Of course, like any lockless operation,
6810 * this provides only a point-in-time snapshot, and the contents may change
6811 * by the time any subsequent locking operation reads the queue.
6812 *
6813 * Returns the pointer, or NULL indicates an empty queue OR internal error.
6814 */
scx_bpf_dsq_peek(u64 dsq_id)6815 __bpf_kfunc struct task_struct *scx_bpf_dsq_peek(u64 dsq_id)
6816 {
6817 struct scx_sched *sch;
6818 struct scx_dispatch_q *dsq;
6819
6820 sch = rcu_dereference(scx_root);
6821 if (unlikely(!sch))
6822 return NULL;
6823
6824 if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN)) {
6825 scx_error(sch, "peek disallowed on builtin DSQ 0x%llx", dsq_id);
6826 return NULL;
6827 }
6828
6829 dsq = find_user_dsq(sch, dsq_id);
6830 if (unlikely(!dsq)) {
6831 scx_error(sch, "peek on non-existent DSQ 0x%llx", dsq_id);
6832 return NULL;
6833 }
6834
6835 return rcu_dereference(dsq->first_task);
6836 }
6837
6838 __bpf_kfunc_end_defs();
6839
__bstr_format(struct scx_sched * sch,u64 * data_buf,char * line_buf,size_t line_size,char * fmt,unsigned long long * data,u32 data__sz)6840 static s32 __bstr_format(struct scx_sched *sch, u64 *data_buf, char *line_buf,
6841 size_t line_size, char *fmt, unsigned long long *data,
6842 u32 data__sz)
6843 {
6844 struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
6845 s32 ret;
6846
6847 if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
6848 (data__sz && !data)) {
6849 scx_error(sch, "invalid data=%p and data__sz=%u", (void *)data, data__sz);
6850 return -EINVAL;
6851 }
6852
6853 ret = copy_from_kernel_nofault(data_buf, data, data__sz);
6854 if (ret < 0) {
6855 scx_error(sch, "failed to read data fields (%d)", ret);
6856 return ret;
6857 }
6858
6859 ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
6860 &bprintf_data);
6861 if (ret < 0) {
6862 scx_error(sch, "format preparation failed (%d)", ret);
6863 return ret;
6864 }
6865
6866 ret = bstr_printf(line_buf, line_size, fmt,
6867 bprintf_data.bin_args);
6868 bpf_bprintf_cleanup(&bprintf_data);
6869 if (ret < 0) {
6870 scx_error(sch, "(\"%s\", %p, %u) failed to format", fmt, data, data__sz);
6871 return ret;
6872 }
6873
6874 return ret;
6875 }
6876
bstr_format(struct scx_sched * sch,struct scx_bstr_buf * buf,char * fmt,unsigned long long * data,u32 data__sz)6877 static s32 bstr_format(struct scx_sched *sch, struct scx_bstr_buf *buf,
6878 char *fmt, unsigned long long *data, u32 data__sz)
6879 {
6880 return __bstr_format(sch, buf->data, buf->line, sizeof(buf->line),
6881 fmt, data, data__sz);
6882 }
6883
6884 __bpf_kfunc_start_defs();
6885
6886 /**
6887 * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
6888 * @exit_code: Exit value to pass to user space via struct scx_exit_info.
6889 * @fmt: error message format string
6890 * @data: format string parameters packaged using ___bpf_fill() macro
6891 * @data__sz: @data len, must end in '__sz' for the verifier
6892 *
6893 * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
6894 * disabling.
6895 */
scx_bpf_exit_bstr(s64 exit_code,char * fmt,unsigned long long * data,u32 data__sz)6896 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
6897 unsigned long long *data, u32 data__sz)
6898 {
6899 struct scx_sched *sch;
6900 unsigned long flags;
6901
6902 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
6903 sch = rcu_dereference_bh(scx_root);
6904 if (likely(sch) &&
6905 bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
6906 scx_exit(sch, SCX_EXIT_UNREG_BPF, exit_code, "%s", scx_exit_bstr_buf.line);
6907 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
6908 }
6909
6910 /**
6911 * scx_bpf_error_bstr - Indicate fatal error
6912 * @fmt: error message format string
6913 * @data: format string parameters packaged using ___bpf_fill() macro
6914 * @data__sz: @data len, must end in '__sz' for the verifier
6915 *
6916 * Indicate that the BPF scheduler encountered a fatal error and initiate ops
6917 * disabling.
6918 */
scx_bpf_error_bstr(char * fmt,unsigned long long * data,u32 data__sz)6919 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
6920 u32 data__sz)
6921 {
6922 struct scx_sched *sch;
6923 unsigned long flags;
6924
6925 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
6926 sch = rcu_dereference_bh(scx_root);
6927 if (likely(sch) &&
6928 bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
6929 scx_exit(sch, SCX_EXIT_ERROR_BPF, 0, "%s", scx_exit_bstr_buf.line);
6930 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
6931 }
6932
6933 /**
6934 * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler
6935 * @fmt: format string
6936 * @data: format string parameters packaged using ___bpf_fill() macro
6937 * @data__sz: @data len, must end in '__sz' for the verifier
6938 *
6939 * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
6940 * dump_task() to generate extra debug dump specific to the BPF scheduler.
6941 *
6942 * The extra dump may be multiple lines. A single line may be split over
6943 * multiple calls. The last line is automatically terminated.
6944 */
scx_bpf_dump_bstr(char * fmt,unsigned long long * data,u32 data__sz)6945 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
6946 u32 data__sz)
6947 {
6948 struct scx_sched *sch;
6949 struct scx_dump_data *dd = &scx_dump_data;
6950 struct scx_bstr_buf *buf = &dd->buf;
6951 s32 ret;
6952
6953 guard(rcu)();
6954
6955 sch = rcu_dereference(scx_root);
6956 if (unlikely(!sch))
6957 return;
6958
6959 if (raw_smp_processor_id() != dd->cpu) {
6960 scx_error(sch, "scx_bpf_dump() must only be called from ops.dump() and friends");
6961 return;
6962 }
6963
6964 /* append the formatted string to the line buf */
6965 ret = __bstr_format(sch, buf->data, buf->line + dd->cursor,
6966 sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
6967 if (ret < 0) {
6968 dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
6969 dd->prefix, fmt, data, data__sz, ret);
6970 return;
6971 }
6972
6973 dd->cursor += ret;
6974 dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
6975
6976 if (!dd->cursor)
6977 return;
6978
6979 /*
6980 * If the line buf overflowed or ends in a newline, flush it into the
6981 * dump. This is to allow the caller to generate a single line over
6982 * multiple calls. As ops_dump_flush() can also handle multiple lines in
6983 * the line buf, the only case which can lead to an unexpected
6984 * truncation is when the caller keeps generating newlines in the middle
6985 * instead of the end consecutively. Don't do that.
6986 */
6987 if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
6988 ops_dump_flush();
6989 }
6990
6991 /**
6992 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
6993 *
6994 * Iterate over all of the tasks currently enqueued on the local DSQ of the
6995 * caller's CPU, and re-enqueue them in the BPF scheduler. Can be called from
6996 * anywhere.
6997 */
scx_bpf_reenqueue_local___v2(void)6998 __bpf_kfunc void scx_bpf_reenqueue_local___v2(void)
6999 {
7000 struct rq *rq;
7001
7002 guard(preempt)();
7003
7004 rq = this_rq();
7005 local_set(&rq->scx.reenq_local_deferred, 1);
7006 schedule_deferred(rq);
7007 }
7008
7009 /**
7010 * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
7011 * @cpu: CPU of interest
7012 *
7013 * Return the maximum relative capacity of @cpu in relation to the most
7014 * performant CPU in the system. The return value is in the range [1,
7015 * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
7016 */
scx_bpf_cpuperf_cap(s32 cpu)7017 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
7018 {
7019 struct scx_sched *sch;
7020
7021 guard(rcu)();
7022
7023 sch = rcu_dereference(scx_root);
7024 if (likely(sch) && ops_cpu_valid(sch, cpu, NULL))
7025 return arch_scale_cpu_capacity(cpu);
7026 else
7027 return SCX_CPUPERF_ONE;
7028 }
7029
7030 /**
7031 * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
7032 * @cpu: CPU of interest
7033 *
7034 * Return the current relative performance of @cpu in relation to its maximum.
7035 * The return value is in the range [1, %SCX_CPUPERF_ONE].
7036 *
7037 * The current performance level of a CPU in relation to the maximum performance
7038 * available in the system can be calculated as follows:
7039 *
7040 * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
7041 *
7042 * The result is in the range [1, %SCX_CPUPERF_ONE].
7043 */
scx_bpf_cpuperf_cur(s32 cpu)7044 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
7045 {
7046 struct scx_sched *sch;
7047
7048 guard(rcu)();
7049
7050 sch = rcu_dereference(scx_root);
7051 if (likely(sch) && ops_cpu_valid(sch, cpu, NULL))
7052 return arch_scale_freq_capacity(cpu);
7053 else
7054 return SCX_CPUPERF_ONE;
7055 }
7056
7057 /**
7058 * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
7059 * @cpu: CPU of interest
7060 * @perf: target performance level [0, %SCX_CPUPERF_ONE]
7061 *
7062 * Set the target performance level of @cpu to @perf. @perf is in linear
7063 * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
7064 * schedutil cpufreq governor chooses the target frequency.
7065 *
7066 * The actual performance level chosen, CPU grouping, and the overhead and
7067 * latency of the operations are dependent on the hardware and cpufreq driver in
7068 * use. Consult hardware and cpufreq documentation for more information. The
7069 * current performance level can be monitored using scx_bpf_cpuperf_cur().
7070 */
scx_bpf_cpuperf_set(s32 cpu,u32 perf)7071 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
7072 {
7073 struct scx_sched *sch;
7074
7075 guard(rcu)();
7076
7077 sch = rcu_dereference(scx_root);
7078 if (unlikely(!sch))
7079 return;
7080
7081 if (unlikely(perf > SCX_CPUPERF_ONE)) {
7082 scx_error(sch, "Invalid cpuperf target %u for CPU %d", perf, cpu);
7083 return;
7084 }
7085
7086 if (ops_cpu_valid(sch, cpu, NULL)) {
7087 struct rq *rq = cpu_rq(cpu), *locked_rq = scx_locked_rq();
7088 struct rq_flags rf;
7089
7090 /*
7091 * When called with an rq lock held, restrict the operation
7092 * to the corresponding CPU to prevent ABBA deadlocks.
7093 */
7094 if (locked_rq && rq != locked_rq) {
7095 scx_error(sch, "Invalid target CPU %d", cpu);
7096 return;
7097 }
7098
7099 /*
7100 * If no rq lock is held, allow to operate on any CPU by
7101 * acquiring the corresponding rq lock.
7102 */
7103 if (!locked_rq) {
7104 rq_lock_irqsave(rq, &rf);
7105 update_rq_clock(rq);
7106 }
7107
7108 rq->scx.cpuperf_target = perf;
7109 cpufreq_update_util(rq, 0);
7110
7111 if (!locked_rq)
7112 rq_unlock_irqrestore(rq, &rf);
7113 }
7114 }
7115
7116 /**
7117 * scx_bpf_nr_node_ids - Return the number of possible node IDs
7118 *
7119 * All valid node IDs in the system are smaller than the returned value.
7120 */
scx_bpf_nr_node_ids(void)7121 __bpf_kfunc u32 scx_bpf_nr_node_ids(void)
7122 {
7123 return nr_node_ids;
7124 }
7125
7126 /**
7127 * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
7128 *
7129 * All valid CPU IDs in the system are smaller than the returned value.
7130 */
scx_bpf_nr_cpu_ids(void)7131 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
7132 {
7133 return nr_cpu_ids;
7134 }
7135
7136 /**
7137 * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
7138 */
scx_bpf_get_possible_cpumask(void)7139 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
7140 {
7141 return cpu_possible_mask;
7142 }
7143
7144 /**
7145 * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
7146 */
scx_bpf_get_online_cpumask(void)7147 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
7148 {
7149 return cpu_online_mask;
7150 }
7151
7152 /**
7153 * scx_bpf_put_cpumask - Release a possible/online cpumask
7154 * @cpumask: cpumask to release
7155 */
scx_bpf_put_cpumask(const struct cpumask * cpumask)7156 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
7157 {
7158 /*
7159 * Empty function body because we aren't actually acquiring or releasing
7160 * a reference to a global cpumask, which is read-only in the caller and
7161 * is never released. The acquire / release semantics here are just used
7162 * to make the cpumask is a trusted pointer in the caller.
7163 */
7164 }
7165
7166 /**
7167 * scx_bpf_task_running - Is task currently running?
7168 * @p: task of interest
7169 */
scx_bpf_task_running(const struct task_struct * p)7170 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
7171 {
7172 return task_rq(p)->curr == p;
7173 }
7174
7175 /**
7176 * scx_bpf_task_cpu - CPU a task is currently associated with
7177 * @p: task of interest
7178 */
scx_bpf_task_cpu(const struct task_struct * p)7179 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
7180 {
7181 return task_cpu(p);
7182 }
7183
7184 /**
7185 * scx_bpf_cpu_rq - Fetch the rq of a CPU
7186 * @cpu: CPU of the rq
7187 */
scx_bpf_cpu_rq(s32 cpu)7188 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
7189 {
7190 struct scx_sched *sch;
7191
7192 guard(rcu)();
7193
7194 sch = rcu_dereference(scx_root);
7195 if (unlikely(!sch))
7196 return NULL;
7197
7198 if (!ops_cpu_valid(sch, cpu, NULL))
7199 return NULL;
7200
7201 if (!sch->warned_deprecated_rq) {
7202 printk_deferred(KERN_WARNING "sched_ext: %s() is deprecated; "
7203 "use scx_bpf_locked_rq() when holding rq lock "
7204 "or scx_bpf_cpu_curr() to read remote curr safely.\n", __func__);
7205 sch->warned_deprecated_rq = true;
7206 }
7207
7208 return cpu_rq(cpu);
7209 }
7210
7211 /**
7212 * scx_bpf_locked_rq - Return the rq currently locked by SCX
7213 *
7214 * Returns the rq if a rq lock is currently held by SCX.
7215 * Otherwise emits an error and returns NULL.
7216 */
scx_bpf_locked_rq(void)7217 __bpf_kfunc struct rq *scx_bpf_locked_rq(void)
7218 {
7219 struct scx_sched *sch;
7220 struct rq *rq;
7221
7222 guard(preempt)();
7223
7224 sch = rcu_dereference_sched(scx_root);
7225 if (unlikely(!sch))
7226 return NULL;
7227
7228 rq = scx_locked_rq();
7229 if (!rq) {
7230 scx_error(sch, "accessing rq without holding rq lock");
7231 return NULL;
7232 }
7233
7234 return rq;
7235 }
7236
7237 /**
7238 * scx_bpf_cpu_curr - Return remote CPU's curr task
7239 * @cpu: CPU of interest
7240 *
7241 * Callers must hold RCU read lock (KF_RCU).
7242 */
scx_bpf_cpu_curr(s32 cpu)7243 __bpf_kfunc struct task_struct *scx_bpf_cpu_curr(s32 cpu)
7244 {
7245 struct scx_sched *sch;
7246
7247 guard(rcu)();
7248
7249 sch = rcu_dereference(scx_root);
7250 if (unlikely(!sch))
7251 return NULL;
7252
7253 if (!ops_cpu_valid(sch, cpu, NULL))
7254 return NULL;
7255
7256 return rcu_dereference(cpu_rq(cpu)->curr);
7257 }
7258
7259 /**
7260 * scx_bpf_task_cgroup - Return the sched cgroup of a task
7261 * @p: task of interest
7262 *
7263 * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
7264 * from the scheduler's POV. SCX operations should use this function to
7265 * determine @p's current cgroup as, unlike following @p->cgroups,
7266 * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
7267 * rq-locked operations. Can be called on the parameter tasks of rq-locked
7268 * operations. The restriction guarantees that @p's rq is locked by the caller.
7269 */
7270 #ifdef CONFIG_CGROUP_SCHED
scx_bpf_task_cgroup(struct task_struct * p)7271 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
7272 {
7273 struct task_group *tg = p->sched_task_group;
7274 struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
7275 struct scx_sched *sch;
7276
7277 guard(rcu)();
7278
7279 sch = rcu_dereference(scx_root);
7280 if (unlikely(!sch))
7281 goto out;
7282
7283 if (!scx_kf_allowed_on_arg_tasks(sch, __SCX_KF_RQ_LOCKED, p))
7284 goto out;
7285
7286 cgrp = tg_cgrp(tg);
7287
7288 out:
7289 cgroup_get(cgrp);
7290 return cgrp;
7291 }
7292 #endif
7293
7294 /**
7295 * scx_bpf_now - Returns a high-performance monotonically non-decreasing
7296 * clock for the current CPU. The clock returned is in nanoseconds.
7297 *
7298 * It provides the following properties:
7299 *
7300 * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently
7301 * to account for execution time and track tasks' runtime properties.
7302 * Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which
7303 * eventually reads a hardware timestamp counter -- is neither performant nor
7304 * scalable. scx_bpf_now() aims to provide a high-performance clock by
7305 * using the rq clock in the scheduler core whenever possible.
7306 *
7307 * 2) High enough resolution for the BPF scheduler use cases: In most BPF
7308 * scheduler use cases, the required clock resolution is lower than the most
7309 * accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically
7310 * uses the rq clock in the scheduler core whenever it is valid. It considers
7311 * that the rq clock is valid from the time the rq clock is updated
7312 * (update_rq_clock) until the rq is unlocked (rq_unpin_lock).
7313 *
7314 * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now()
7315 * guarantees the clock never goes backward when comparing them in the same
7316 * CPU. On the other hand, when comparing clocks in different CPUs, there
7317 * is no such guarantee -- the clock can go backward. It provides a
7318 * monotonically *non-decreasing* clock so that it would provide the same
7319 * clock values in two different scx_bpf_now() calls in the same CPU
7320 * during the same period of when the rq clock is valid.
7321 */
scx_bpf_now(void)7322 __bpf_kfunc u64 scx_bpf_now(void)
7323 {
7324 struct rq *rq;
7325 u64 clock;
7326
7327 preempt_disable();
7328
7329 rq = this_rq();
7330 if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) {
7331 /*
7332 * If the rq clock is valid, use the cached rq clock.
7333 *
7334 * Note that scx_bpf_now() is re-entrant between a process
7335 * context and an interrupt context (e.g., timer interrupt).
7336 * However, we don't need to consider the race between them
7337 * because such race is not observable from a caller.
7338 */
7339 clock = READ_ONCE(rq->scx.clock);
7340 } else {
7341 /*
7342 * Otherwise, return a fresh rq clock.
7343 *
7344 * The rq clock is updated outside of the rq lock.
7345 * In this case, keep the updated rq clock invalid so the next
7346 * kfunc call outside the rq lock gets a fresh rq clock.
7347 */
7348 clock = sched_clock_cpu(cpu_of(rq));
7349 }
7350
7351 preempt_enable();
7352
7353 return clock;
7354 }
7355
scx_read_events(struct scx_sched * sch,struct scx_event_stats * events)7356 static void scx_read_events(struct scx_sched *sch, struct scx_event_stats *events)
7357 {
7358 struct scx_event_stats *e_cpu;
7359 int cpu;
7360
7361 /* Aggregate per-CPU event counters into @events. */
7362 memset(events, 0, sizeof(*events));
7363 for_each_possible_cpu(cpu) {
7364 e_cpu = &per_cpu_ptr(sch->pcpu, cpu)->event_stats;
7365 scx_agg_event(events, e_cpu, SCX_EV_SELECT_CPU_FALLBACK);
7366 scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
7367 scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_KEEP_LAST);
7368 scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_EXITING);
7369 scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
7370 scx_agg_event(events, e_cpu, SCX_EV_REFILL_SLICE_DFL);
7371 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DURATION);
7372 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DISPATCH);
7373 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_ACTIVATE);
7374 }
7375 }
7376
7377 /*
7378 * scx_bpf_events - Get a system-wide event counter to
7379 * @events: output buffer from a BPF program
7380 * @events__sz: @events len, must end in '__sz'' for the verifier
7381 */
scx_bpf_events(struct scx_event_stats * events,size_t events__sz)7382 __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events,
7383 size_t events__sz)
7384 {
7385 struct scx_sched *sch;
7386 struct scx_event_stats e_sys;
7387
7388 rcu_read_lock();
7389 sch = rcu_dereference(scx_root);
7390 if (sch)
7391 scx_read_events(sch, &e_sys);
7392 else
7393 memset(&e_sys, 0, sizeof(e_sys));
7394 rcu_read_unlock();
7395
7396 /*
7397 * We cannot entirely trust a BPF-provided size since a BPF program
7398 * might be compiled against a different vmlinux.h, of which
7399 * scx_event_stats would be larger (a newer vmlinux.h) or smaller
7400 * (an older vmlinux.h). Hence, we use the smaller size to avoid
7401 * memory corruption.
7402 */
7403 events__sz = min(events__sz, sizeof(*events));
7404 memcpy(events, &e_sys, events__sz);
7405 }
7406
7407 __bpf_kfunc_end_defs();
7408
7409 BTF_KFUNCS_START(scx_kfunc_ids_any)
7410 BTF_ID_FLAGS(func, scx_bpf_task_set_slice, KF_RCU);
7411 BTF_ID_FLAGS(func, scx_bpf_task_set_dsq_vtime, KF_RCU);
7412 BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
7413 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
7414 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
7415 BTF_ID_FLAGS(func, scx_bpf_dsq_peek, KF_RCU_PROTECTED | KF_RET_NULL)
7416 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
7417 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
7418 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
7419 BTF_ID_FLAGS(func, scx_bpf_exit_bstr)
7420 BTF_ID_FLAGS(func, scx_bpf_error_bstr)
7421 BTF_ID_FLAGS(func, scx_bpf_dump_bstr)
7422 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local___v2)
7423 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
7424 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
7425 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
7426 BTF_ID_FLAGS(func, scx_bpf_nr_node_ids)
7427 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
7428 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
7429 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
7430 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
7431 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
7432 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
7433 BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
7434 BTF_ID_FLAGS(func, scx_bpf_locked_rq, KF_RET_NULL)
7435 BTF_ID_FLAGS(func, scx_bpf_cpu_curr, KF_RET_NULL | KF_RCU_PROTECTED)
7436 #ifdef CONFIG_CGROUP_SCHED
7437 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
7438 #endif
7439 BTF_ID_FLAGS(func, scx_bpf_now)
7440 BTF_ID_FLAGS(func, scx_bpf_events)
7441 BTF_KFUNCS_END(scx_kfunc_ids_any)
7442
7443 static const struct btf_kfunc_id_set scx_kfunc_set_any = {
7444 .owner = THIS_MODULE,
7445 .set = &scx_kfunc_ids_any,
7446 };
7447
scx_init(void)7448 static int __init scx_init(void)
7449 {
7450 int ret;
7451
7452 /*
7453 * kfunc registration can't be done from init_sched_ext_class() as
7454 * register_btf_kfunc_id_set() needs most of the system to be up.
7455 *
7456 * Some kfuncs are context-sensitive and can only be called from
7457 * specific SCX ops. They are grouped into BTF sets accordingly.
7458 * Unfortunately, BPF currently doesn't have a way of enforcing such
7459 * restrictions. Eventually, the verifier should be able to enforce
7460 * them. For now, register them the same and make each kfunc explicitly
7461 * check using scx_kf_allowed().
7462 */
7463 if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7464 &scx_kfunc_set_enqueue_dispatch)) ||
7465 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7466 &scx_kfunc_set_dispatch)) ||
7467 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7468 &scx_kfunc_set_cpu_release)) ||
7469 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7470 &scx_kfunc_set_unlocked)) ||
7471 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7472 &scx_kfunc_set_unlocked)) ||
7473 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7474 &scx_kfunc_set_any)) ||
7475 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
7476 &scx_kfunc_set_any)) ||
7477 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7478 &scx_kfunc_set_any))) {
7479 pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
7480 return ret;
7481 }
7482
7483 ret = scx_idle_init();
7484 if (ret) {
7485 pr_err("sched_ext: Failed to initialize idle tracking (%d)\n", ret);
7486 return ret;
7487 }
7488
7489 ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
7490 if (ret) {
7491 pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
7492 return ret;
7493 }
7494
7495 ret = register_pm_notifier(&scx_pm_notifier);
7496 if (ret) {
7497 pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
7498 return ret;
7499 }
7500
7501 scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
7502 if (!scx_kset) {
7503 pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
7504 return -ENOMEM;
7505 }
7506
7507 ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
7508 if (ret < 0) {
7509 pr_err("sched_ext: Failed to add global attributes\n");
7510 return ret;
7511 }
7512
7513 if (!alloc_cpumask_var(&scx_bypass_lb_donee_cpumask, GFP_KERNEL) ||
7514 !alloc_cpumask_var(&scx_bypass_lb_resched_cpumask, GFP_KERNEL)) {
7515 pr_err("sched_ext: Failed to allocate cpumasks\n");
7516 return -ENOMEM;
7517 }
7518
7519 return 0;
7520 }
7521 __initcall(scx_init);
7522