1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst 4 * 5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org> 7 * Copyright (c) 2022 David Vernet <dvernet@meta.com> 8 */ 9 #include <linux/btf_ids.h> 10 #include "ext_idle.h" 11 12 /* 13 * NOTE: sched_ext is in the process of growing multiple scheduler support and 14 * scx_root usage is in a transitional state. Naked dereferences are safe if the 15 * caller is one of the tasks attached to SCX and explicit RCU dereference is 16 * necessary otherwise. Naked scx_root dereferences trigger sparse warnings but 17 * are used as temporary markers to indicate that the dereferences need to be 18 * updated to point to the associated scheduler instances rather than scx_root. 19 */ 20 static struct scx_sched __rcu *scx_root; 21 22 /* 23 * During exit, a task may schedule after losing its PIDs. When disabling the 24 * BPF scheduler, we need to be able to iterate tasks in every state to 25 * guarantee system safety. Maintain a dedicated task list which contains every 26 * task between its fork and eventual free. 27 */ 28 static DEFINE_RAW_SPINLOCK(scx_tasks_lock); 29 static LIST_HEAD(scx_tasks); 30 31 /* ops enable/disable */ 32 static DEFINE_MUTEX(scx_enable_mutex); 33 DEFINE_STATIC_KEY_FALSE(__scx_enabled); 34 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem); 35 static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED); 36 static int scx_bypass_depth; 37 static cpumask_var_t scx_bypass_lb_donee_cpumask; 38 static cpumask_var_t scx_bypass_lb_resched_cpumask; 39 static bool scx_aborting; 40 static bool scx_init_task_enabled; 41 static bool scx_switching_all; 42 DEFINE_STATIC_KEY_FALSE(__scx_switched_all); 43 44 /* 45 * Tracks whether scx_enable() called scx_bypass(true). Used to balance bypass 46 * depth on enable failure. Will be removed when bypass depth is moved into the 47 * sched instance. 48 */ 49 static bool scx_bypassed_for_enable; 50 51 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0); 52 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0); 53 54 /* 55 * A monotically increasing sequence number that is incremented every time a 56 * scheduler is enabled. This can be used by to check if any custom sched_ext 57 * scheduler has ever been used in the system. 58 */ 59 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0); 60 61 /* 62 * The maximum amount of time in jiffies that a task may be runnable without 63 * being scheduled on a CPU. If this timeout is exceeded, it will trigger 64 * scx_error(). 65 */ 66 static unsigned long scx_watchdog_timeout; 67 68 /* 69 * The last time the delayed work was run. This delayed work relies on 70 * ksoftirqd being able to run to service timer interrupts, so it's possible 71 * that this work itself could get wedged. To account for this, we check that 72 * it's not stalled in the timer tick, and trigger an error if it is. 73 */ 74 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES; 75 76 static struct delayed_work scx_watchdog_work; 77 78 /* 79 * For %SCX_KICK_WAIT: Each CPU has a pointer to an array of kick_sync sequence 80 * numbers. The arrays are allocated with kvzalloc() as size can exceed percpu 81 * allocator limits on large machines. O(nr_cpu_ids^2) allocation, allocated 82 * lazily when enabling and freed when disabling to avoid waste when sched_ext 83 * isn't active. 84 */ 85 struct scx_kick_syncs { 86 struct rcu_head rcu; 87 unsigned long syncs[]; 88 }; 89 90 static DEFINE_PER_CPU(struct scx_kick_syncs __rcu *, scx_kick_syncs); 91 92 /* 93 * Direct dispatch marker. 94 * 95 * Non-NULL values are used for direct dispatch from enqueue path. A valid 96 * pointer points to the task currently being enqueued. An ERR_PTR value is used 97 * to indicate that direct dispatch has already happened. 98 */ 99 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task); 100 101 static const struct rhashtable_params dsq_hash_params = { 102 .key_len = sizeof_field(struct scx_dispatch_q, id), 103 .key_offset = offsetof(struct scx_dispatch_q, id), 104 .head_offset = offsetof(struct scx_dispatch_q, hash_node), 105 }; 106 107 static LLIST_HEAD(dsqs_to_free); 108 109 /* dispatch buf */ 110 struct scx_dsp_buf_ent { 111 struct task_struct *task; 112 unsigned long qseq; 113 u64 dsq_id; 114 u64 enq_flags; 115 }; 116 117 static u32 scx_dsp_max_batch; 118 119 struct scx_dsp_ctx { 120 struct rq *rq; 121 u32 cursor; 122 u32 nr_tasks; 123 struct scx_dsp_buf_ent buf[]; 124 }; 125 126 static struct scx_dsp_ctx __percpu *scx_dsp_ctx; 127 128 /* string formatting from BPF */ 129 struct scx_bstr_buf { 130 u64 data[MAX_BPRINTF_VARARGS]; 131 char line[SCX_EXIT_MSG_LEN]; 132 }; 133 134 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock); 135 static struct scx_bstr_buf scx_exit_bstr_buf; 136 137 /* ops debug dump */ 138 struct scx_dump_data { 139 s32 cpu; 140 bool first; 141 s32 cursor; 142 struct seq_buf *s; 143 const char *prefix; 144 struct scx_bstr_buf buf; 145 }; 146 147 static struct scx_dump_data scx_dump_data = { 148 .cpu = -1, 149 }; 150 151 /* /sys/kernel/sched_ext interface */ 152 static struct kset *scx_kset; 153 154 /* 155 * Parameters that can be adjusted through /sys/module/sched_ext/parameters. 156 * There usually is no reason to modify these as normal scheduler operation 157 * shouldn't be affected by them. The knobs are primarily for debugging. 158 */ 159 static u64 scx_slice_dfl = SCX_SLICE_DFL; 160 static unsigned int scx_slice_bypass_us = SCX_SLICE_BYPASS / NSEC_PER_USEC; 161 static unsigned int scx_bypass_lb_intv_us = SCX_BYPASS_LB_DFL_INTV_US; 162 163 static int set_slice_us(const char *val, const struct kernel_param *kp) 164 { 165 return param_set_uint_minmax(val, kp, 100, 100 * USEC_PER_MSEC); 166 } 167 168 static const struct kernel_param_ops slice_us_param_ops = { 169 .set = set_slice_us, 170 .get = param_get_uint, 171 }; 172 173 static int set_bypass_lb_intv_us(const char *val, const struct kernel_param *kp) 174 { 175 return param_set_uint_minmax(val, kp, 0, 10 * USEC_PER_SEC); 176 } 177 178 static const struct kernel_param_ops bypass_lb_intv_us_param_ops = { 179 .set = set_bypass_lb_intv_us, 180 .get = param_get_uint, 181 }; 182 183 #undef MODULE_PARAM_PREFIX 184 #define MODULE_PARAM_PREFIX "sched_ext." 185 186 module_param_cb(slice_bypass_us, &slice_us_param_ops, &scx_slice_bypass_us, 0600); 187 MODULE_PARM_DESC(slice_bypass_us, "bypass slice in microseconds, applied on [un]load (100us to 100ms)"); 188 module_param_cb(bypass_lb_intv_us, &bypass_lb_intv_us_param_ops, &scx_bypass_lb_intv_us, 0600); 189 MODULE_PARM_DESC(bypass_lb_intv_us, "bypass load balance interval in microseconds (0 (disable) to 10s)"); 190 191 #undef MODULE_PARAM_PREFIX 192 193 #define CREATE_TRACE_POINTS 194 #include <trace/events/sched_ext.h> 195 196 static void process_ddsp_deferred_locals(struct rq *rq); 197 static bool task_dead_and_done(struct task_struct *p); 198 static u32 reenq_local(struct rq *rq); 199 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags); 200 static bool scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind, 201 s64 exit_code, const char *fmt, va_list args); 202 203 static __printf(4, 5) bool scx_exit(struct scx_sched *sch, 204 enum scx_exit_kind kind, s64 exit_code, 205 const char *fmt, ...) 206 { 207 va_list args; 208 bool ret; 209 210 va_start(args, fmt); 211 ret = scx_vexit(sch, kind, exit_code, fmt, args); 212 va_end(args); 213 214 return ret; 215 } 216 217 #define scx_error(sch, fmt, args...) scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args) 218 #define scx_verror(sch, fmt, args) scx_vexit((sch), SCX_EXIT_ERROR, 0, fmt, args) 219 220 #define SCX_HAS_OP(sch, op) test_bit(SCX_OP_IDX(op), (sch)->has_op) 221 222 static long jiffies_delta_msecs(unsigned long at, unsigned long now) 223 { 224 if (time_after(at, now)) 225 return jiffies_to_msecs(at - now); 226 else 227 return -(long)jiffies_to_msecs(now - at); 228 } 229 230 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */ 231 static u32 higher_bits(u32 flags) 232 { 233 return ~((1 << fls(flags)) - 1); 234 } 235 236 /* return the mask with only the highest bit set */ 237 static u32 highest_bit(u32 flags) 238 { 239 int bit = fls(flags); 240 return ((u64)1 << bit) >> 1; 241 } 242 243 static bool u32_before(u32 a, u32 b) 244 { 245 return (s32)(a - b) < 0; 246 } 247 248 static struct scx_dispatch_q *find_global_dsq(struct scx_sched *sch, 249 struct task_struct *p) 250 { 251 return sch->global_dsqs[cpu_to_node(task_cpu(p))]; 252 } 253 254 static struct scx_dispatch_q *find_user_dsq(struct scx_sched *sch, u64 dsq_id) 255 { 256 return rhashtable_lookup(&sch->dsq_hash, &dsq_id, dsq_hash_params); 257 } 258 259 static const struct sched_class *scx_setscheduler_class(struct task_struct *p) 260 { 261 if (p->sched_class == &stop_sched_class) 262 return &stop_sched_class; 263 264 return __setscheduler_class(p->policy, p->prio); 265 } 266 267 /* 268 * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX 269 * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate 270 * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check 271 * whether it's running from an allowed context. 272 * 273 * @mask is constant, always inline to cull the mask calculations. 274 */ 275 static __always_inline void scx_kf_allow(u32 mask) 276 { 277 /* nesting is allowed only in increasing scx_kf_mask order */ 278 WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask, 279 "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n", 280 current->scx.kf_mask, mask); 281 current->scx.kf_mask |= mask; 282 barrier(); 283 } 284 285 static void scx_kf_disallow(u32 mask) 286 { 287 barrier(); 288 current->scx.kf_mask &= ~mask; 289 } 290 291 /* 292 * Track the rq currently locked. 293 * 294 * This allows kfuncs to safely operate on rq from any scx ops callback, 295 * knowing which rq is already locked. 296 */ 297 DEFINE_PER_CPU(struct rq *, scx_locked_rq_state); 298 299 static inline void update_locked_rq(struct rq *rq) 300 { 301 /* 302 * Check whether @rq is actually locked. This can help expose bugs 303 * or incorrect assumptions about the context in which a kfunc or 304 * callback is executed. 305 */ 306 if (rq) 307 lockdep_assert_rq_held(rq); 308 __this_cpu_write(scx_locked_rq_state, rq); 309 } 310 311 #define SCX_CALL_OP(sch, mask, op, rq, args...) \ 312 do { \ 313 if (rq) \ 314 update_locked_rq(rq); \ 315 if (mask) { \ 316 scx_kf_allow(mask); \ 317 (sch)->ops.op(args); \ 318 scx_kf_disallow(mask); \ 319 } else { \ 320 (sch)->ops.op(args); \ 321 } \ 322 if (rq) \ 323 update_locked_rq(NULL); \ 324 } while (0) 325 326 #define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \ 327 ({ \ 328 __typeof__((sch)->ops.op(args)) __ret; \ 329 \ 330 if (rq) \ 331 update_locked_rq(rq); \ 332 if (mask) { \ 333 scx_kf_allow(mask); \ 334 __ret = (sch)->ops.op(args); \ 335 scx_kf_disallow(mask); \ 336 } else { \ 337 __ret = (sch)->ops.op(args); \ 338 } \ 339 if (rq) \ 340 update_locked_rq(NULL); \ 341 __ret; \ 342 }) 343 344 /* 345 * Some kfuncs are allowed only on the tasks that are subjects of the 346 * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such 347 * restrictions, the following SCX_CALL_OP_*() variants should be used when 348 * invoking scx_ops operations that take task arguments. These can only be used 349 * for non-nesting operations due to the way the tasks are tracked. 350 * 351 * kfuncs which can only operate on such tasks can in turn use 352 * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on 353 * the specific task. 354 */ 355 #define SCX_CALL_OP_TASK(sch, mask, op, rq, task, args...) \ 356 do { \ 357 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 358 current->scx.kf_tasks[0] = task; \ 359 SCX_CALL_OP((sch), mask, op, rq, task, ##args); \ 360 current->scx.kf_tasks[0] = NULL; \ 361 } while (0) 362 363 #define SCX_CALL_OP_TASK_RET(sch, mask, op, rq, task, args...) \ 364 ({ \ 365 __typeof__((sch)->ops.op(task, ##args)) __ret; \ 366 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 367 current->scx.kf_tasks[0] = task; \ 368 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task, ##args); \ 369 current->scx.kf_tasks[0] = NULL; \ 370 __ret; \ 371 }) 372 373 #define SCX_CALL_OP_2TASKS_RET(sch, mask, op, rq, task0, task1, args...) \ 374 ({ \ 375 __typeof__((sch)->ops.op(task0, task1, ##args)) __ret; \ 376 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 377 current->scx.kf_tasks[0] = task0; \ 378 current->scx.kf_tasks[1] = task1; \ 379 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task0, task1, ##args); \ 380 current->scx.kf_tasks[0] = NULL; \ 381 current->scx.kf_tasks[1] = NULL; \ 382 __ret; \ 383 }) 384 385 /* @mask is constant, always inline to cull unnecessary branches */ 386 static __always_inline bool scx_kf_allowed(struct scx_sched *sch, u32 mask) 387 { 388 if (unlikely(!(current->scx.kf_mask & mask))) { 389 scx_error(sch, "kfunc with mask 0x%x called from an operation only allowing 0x%x", 390 mask, current->scx.kf_mask); 391 return false; 392 } 393 394 /* 395 * Enforce nesting boundaries. e.g. A kfunc which can be called from 396 * DISPATCH must not be called if we're running DEQUEUE which is nested 397 * inside ops.dispatch(). We don't need to check boundaries for any 398 * blocking kfuncs as the verifier ensures they're only called from 399 * sleepable progs. 400 */ 401 if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE && 402 (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) { 403 scx_error(sch, "cpu_release kfunc called from a nested operation"); 404 return false; 405 } 406 407 if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH && 408 (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) { 409 scx_error(sch, "dispatch kfunc called from a nested operation"); 410 return false; 411 } 412 413 return true; 414 } 415 416 /* see SCX_CALL_OP_TASK() */ 417 static __always_inline bool scx_kf_allowed_on_arg_tasks(struct scx_sched *sch, 418 u32 mask, 419 struct task_struct *p) 420 { 421 if (!scx_kf_allowed(sch, mask)) 422 return false; 423 424 if (unlikely((p != current->scx.kf_tasks[0] && 425 p != current->scx.kf_tasks[1]))) { 426 scx_error(sch, "called on a task not being operated on"); 427 return false; 428 } 429 430 return true; 431 } 432 433 /** 434 * nldsq_next_task - Iterate to the next task in a non-local DSQ 435 * @dsq: user dsq being iterated 436 * @cur: current position, %NULL to start iteration 437 * @rev: walk backwards 438 * 439 * Returns %NULL when iteration is finished. 440 */ 441 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq, 442 struct task_struct *cur, bool rev) 443 { 444 struct list_head *list_node; 445 struct scx_dsq_list_node *dsq_lnode; 446 447 lockdep_assert_held(&dsq->lock); 448 449 if (cur) 450 list_node = &cur->scx.dsq_list.node; 451 else 452 list_node = &dsq->list; 453 454 /* find the next task, need to skip BPF iteration cursors */ 455 do { 456 if (rev) 457 list_node = list_node->prev; 458 else 459 list_node = list_node->next; 460 461 if (list_node == &dsq->list) 462 return NULL; 463 464 dsq_lnode = container_of(list_node, struct scx_dsq_list_node, 465 node); 466 } while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR); 467 468 return container_of(dsq_lnode, struct task_struct, scx.dsq_list); 469 } 470 471 #define nldsq_for_each_task(p, dsq) \ 472 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \ 473 (p) = nldsq_next_task((dsq), (p), false)) 474 475 476 /* 477 * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse] 478 * dispatch order. BPF-visible iterator is opaque and larger to allow future 479 * changes without breaking backward compatibility. Can be used with 480 * bpf_for_each(). See bpf_iter_scx_dsq_*(). 481 */ 482 enum scx_dsq_iter_flags { 483 /* iterate in the reverse dispatch order */ 484 SCX_DSQ_ITER_REV = 1U << 16, 485 486 __SCX_DSQ_ITER_HAS_SLICE = 1U << 30, 487 __SCX_DSQ_ITER_HAS_VTIME = 1U << 31, 488 489 __SCX_DSQ_ITER_USER_FLAGS = SCX_DSQ_ITER_REV, 490 __SCX_DSQ_ITER_ALL_FLAGS = __SCX_DSQ_ITER_USER_FLAGS | 491 __SCX_DSQ_ITER_HAS_SLICE | 492 __SCX_DSQ_ITER_HAS_VTIME, 493 }; 494 495 struct bpf_iter_scx_dsq_kern { 496 struct scx_dsq_list_node cursor; 497 struct scx_dispatch_q *dsq; 498 u64 slice; 499 u64 vtime; 500 } __attribute__((aligned(8))); 501 502 struct bpf_iter_scx_dsq { 503 u64 __opaque[6]; 504 } __attribute__((aligned(8))); 505 506 507 /* 508 * SCX task iterator. 509 */ 510 struct scx_task_iter { 511 struct sched_ext_entity cursor; 512 struct task_struct *locked_task; 513 struct rq *rq; 514 struct rq_flags rf; 515 u32 cnt; 516 bool list_locked; 517 }; 518 519 /** 520 * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration 521 * @iter: iterator to init 522 * 523 * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter 524 * must eventually be stopped with scx_task_iter_stop(). 525 * 526 * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock() 527 * between this and the first next() call or between any two next() calls. If 528 * the locks are released between two next() calls, the caller is responsible 529 * for ensuring that the task being iterated remains accessible either through 530 * RCU read lock or obtaining a reference count. 531 * 532 * All tasks which existed when the iteration started are guaranteed to be 533 * visited as long as they are not dead. 534 */ 535 static void scx_task_iter_start(struct scx_task_iter *iter) 536 { 537 memset(iter, 0, sizeof(*iter)); 538 539 raw_spin_lock_irq(&scx_tasks_lock); 540 541 iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR }; 542 list_add(&iter->cursor.tasks_node, &scx_tasks); 543 iter->list_locked = true; 544 } 545 546 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter) 547 { 548 if (iter->locked_task) { 549 __balance_callbacks(iter->rq, &iter->rf); 550 task_rq_unlock(iter->rq, iter->locked_task, &iter->rf); 551 iter->locked_task = NULL; 552 } 553 } 554 555 /** 556 * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator 557 * @iter: iterator to unlock 558 * 559 * If @iter is in the middle of a locked iteration, it may be locking the rq of 560 * the task currently being visited in addition to scx_tasks_lock. Unlock both. 561 * This function can be safely called anytime during an iteration. The next 562 * iterator operation will automatically restore the necessary locking. 563 */ 564 static void scx_task_iter_unlock(struct scx_task_iter *iter) 565 { 566 __scx_task_iter_rq_unlock(iter); 567 if (iter->list_locked) { 568 iter->list_locked = false; 569 raw_spin_unlock_irq(&scx_tasks_lock); 570 } 571 } 572 573 static void __scx_task_iter_maybe_relock(struct scx_task_iter *iter) 574 { 575 if (!iter->list_locked) { 576 raw_spin_lock_irq(&scx_tasks_lock); 577 iter->list_locked = true; 578 } 579 } 580 581 /** 582 * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock 583 * @iter: iterator to exit 584 * 585 * Exit a previously initialized @iter. Must be called with scx_tasks_lock held 586 * which is released on return. If the iterator holds a task's rq lock, that rq 587 * lock is also released. See scx_task_iter_start() for details. 588 */ 589 static void scx_task_iter_stop(struct scx_task_iter *iter) 590 { 591 __scx_task_iter_maybe_relock(iter); 592 list_del_init(&iter->cursor.tasks_node); 593 scx_task_iter_unlock(iter); 594 } 595 596 /** 597 * scx_task_iter_next - Next task 598 * @iter: iterator to walk 599 * 600 * Visit the next task. See scx_task_iter_start() for details. Locks are dropped 601 * and re-acquired every %SCX_TASK_ITER_BATCH iterations to avoid causing stalls 602 * by holding scx_tasks_lock for too long. 603 */ 604 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter) 605 { 606 struct list_head *cursor = &iter->cursor.tasks_node; 607 struct sched_ext_entity *pos; 608 609 if (!(++iter->cnt % SCX_TASK_ITER_BATCH)) { 610 scx_task_iter_unlock(iter); 611 cond_resched(); 612 } 613 614 __scx_task_iter_maybe_relock(iter); 615 616 list_for_each_entry(pos, cursor, tasks_node) { 617 if (&pos->tasks_node == &scx_tasks) 618 return NULL; 619 if (!(pos->flags & SCX_TASK_CURSOR)) { 620 list_move(cursor, &pos->tasks_node); 621 return container_of(pos, struct task_struct, scx); 622 } 623 } 624 625 /* can't happen, should always terminate at scx_tasks above */ 626 BUG(); 627 } 628 629 /** 630 * scx_task_iter_next_locked - Next non-idle task with its rq locked 631 * @iter: iterator to walk 632 * 633 * Visit the non-idle task with its rq lock held. Allows callers to specify 634 * whether they would like to filter out dead tasks. See scx_task_iter_start() 635 * for details. 636 */ 637 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter) 638 { 639 struct task_struct *p; 640 641 __scx_task_iter_rq_unlock(iter); 642 643 while ((p = scx_task_iter_next(iter))) { 644 /* 645 * scx_task_iter is used to prepare and move tasks into SCX 646 * while loading the BPF scheduler and vice-versa while 647 * unloading. The init_tasks ("swappers") should be excluded 648 * from the iteration because: 649 * 650 * - It's unsafe to use __setschduler_prio() on an init_task to 651 * determine the sched_class to use as it won't preserve its 652 * idle_sched_class. 653 * 654 * - ops.init/exit_task() can easily be confused if called with 655 * init_tasks as they, e.g., share PID 0. 656 * 657 * As init_tasks are never scheduled through SCX, they can be 658 * skipped safely. Note that is_idle_task() which tests %PF_IDLE 659 * doesn't work here: 660 * 661 * - %PF_IDLE may not be set for an init_task whose CPU hasn't 662 * yet been onlined. 663 * 664 * - %PF_IDLE can be set on tasks that are not init_tasks. See 665 * play_idle_precise() used by CONFIG_IDLE_INJECT. 666 * 667 * Test for idle_sched_class as only init_tasks are on it. 668 */ 669 if (p->sched_class != &idle_sched_class) 670 break; 671 } 672 if (!p) 673 return NULL; 674 675 iter->rq = task_rq_lock(p, &iter->rf); 676 iter->locked_task = p; 677 678 return p; 679 } 680 681 /** 682 * scx_add_event - Increase an event counter for 'name' by 'cnt' 683 * @sch: scx_sched to account events for 684 * @name: an event name defined in struct scx_event_stats 685 * @cnt: the number of the event occurred 686 * 687 * This can be used when preemption is not disabled. 688 */ 689 #define scx_add_event(sch, name, cnt) do { \ 690 this_cpu_add((sch)->pcpu->event_stats.name, (cnt)); \ 691 trace_sched_ext_event(#name, (cnt)); \ 692 } while(0) 693 694 /** 695 * __scx_add_event - Increase an event counter for 'name' by 'cnt' 696 * @sch: scx_sched to account events for 697 * @name: an event name defined in struct scx_event_stats 698 * @cnt: the number of the event occurred 699 * 700 * This should be used only when preemption is disabled. 701 */ 702 #define __scx_add_event(sch, name, cnt) do { \ 703 __this_cpu_add((sch)->pcpu->event_stats.name, (cnt)); \ 704 trace_sched_ext_event(#name, cnt); \ 705 } while(0) 706 707 /** 708 * scx_agg_event - Aggregate an event counter 'kind' from 'src_e' to 'dst_e' 709 * @dst_e: destination event stats 710 * @src_e: source event stats 711 * @kind: a kind of event to be aggregated 712 */ 713 #define scx_agg_event(dst_e, src_e, kind) do { \ 714 (dst_e)->kind += READ_ONCE((src_e)->kind); \ 715 } while(0) 716 717 /** 718 * scx_dump_event - Dump an event 'kind' in 'events' to 's' 719 * @s: output seq_buf 720 * @events: event stats 721 * @kind: a kind of event to dump 722 */ 723 #define scx_dump_event(s, events, kind) do { \ 724 dump_line(&(s), "%40s: %16lld", #kind, (events)->kind); \ 725 } while (0) 726 727 728 static void scx_read_events(struct scx_sched *sch, 729 struct scx_event_stats *events); 730 731 static enum scx_enable_state scx_enable_state(void) 732 { 733 return atomic_read(&scx_enable_state_var); 734 } 735 736 static enum scx_enable_state scx_set_enable_state(enum scx_enable_state to) 737 { 738 return atomic_xchg(&scx_enable_state_var, to); 739 } 740 741 static bool scx_tryset_enable_state(enum scx_enable_state to, 742 enum scx_enable_state from) 743 { 744 int from_v = from; 745 746 return atomic_try_cmpxchg(&scx_enable_state_var, &from_v, to); 747 } 748 749 /** 750 * wait_ops_state - Busy-wait the specified ops state to end 751 * @p: target task 752 * @opss: state to wait the end of 753 * 754 * Busy-wait for @p to transition out of @opss. This can only be used when the 755 * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also 756 * has load_acquire semantics to ensure that the caller can see the updates made 757 * in the enqueueing and dispatching paths. 758 */ 759 static void wait_ops_state(struct task_struct *p, unsigned long opss) 760 { 761 do { 762 cpu_relax(); 763 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss); 764 } 765 766 static inline bool __cpu_valid(s32 cpu) 767 { 768 return likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu)); 769 } 770 771 /** 772 * ops_cpu_valid - Verify a cpu number, to be used on ops input args 773 * @sch: scx_sched to abort on error 774 * @cpu: cpu number which came from a BPF ops 775 * @where: extra information reported on error 776 * 777 * @cpu is a cpu number which came from the BPF scheduler and can be any value. 778 * Verify that it is in range and one of the possible cpus. If invalid, trigger 779 * an ops error. 780 */ 781 static bool ops_cpu_valid(struct scx_sched *sch, s32 cpu, const char *where) 782 { 783 if (__cpu_valid(cpu)) { 784 return true; 785 } else { 786 scx_error(sch, "invalid CPU %d%s%s", cpu, where ? " " : "", where ?: ""); 787 return false; 788 } 789 } 790 791 /** 792 * ops_sanitize_err - Sanitize a -errno value 793 * @sch: scx_sched to error out on error 794 * @ops_name: operation to blame on failure 795 * @err: -errno value to sanitize 796 * 797 * Verify @err is a valid -errno. If not, trigger scx_error() and return 798 * -%EPROTO. This is necessary because returning a rogue -errno up the chain can 799 * cause misbehaviors. For an example, a large negative return from 800 * ops.init_task() triggers an oops when passed up the call chain because the 801 * value fails IS_ERR() test after being encoded with ERR_PTR() and then is 802 * handled as a pointer. 803 */ 804 static int ops_sanitize_err(struct scx_sched *sch, const char *ops_name, s32 err) 805 { 806 if (err < 0 && err >= -MAX_ERRNO) 807 return err; 808 809 scx_error(sch, "ops.%s() returned an invalid errno %d", ops_name, err); 810 return -EPROTO; 811 } 812 813 static void run_deferred(struct rq *rq) 814 { 815 process_ddsp_deferred_locals(rq); 816 817 if (local_read(&rq->scx.reenq_local_deferred)) { 818 local_set(&rq->scx.reenq_local_deferred, 0); 819 reenq_local(rq); 820 } 821 } 822 823 static void deferred_bal_cb_workfn(struct rq *rq) 824 { 825 run_deferred(rq); 826 } 827 828 static void deferred_irq_workfn(struct irq_work *irq_work) 829 { 830 struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work); 831 832 raw_spin_rq_lock(rq); 833 run_deferred(rq); 834 raw_spin_rq_unlock(rq); 835 } 836 837 /** 838 * schedule_deferred - Schedule execution of deferred actions on an rq 839 * @rq: target rq 840 * 841 * Schedule execution of deferred actions on @rq. Deferred actions are executed 842 * with @rq locked but unpinned, and thus can unlock @rq to e.g. migrate tasks 843 * to other rqs. 844 */ 845 static void schedule_deferred(struct rq *rq) 846 { 847 /* 848 * Queue an irq work. They are executed on IRQ re-enable which may take 849 * a bit longer than the scheduler hook in schedule_deferred_locked(). 850 */ 851 irq_work_queue(&rq->scx.deferred_irq_work); 852 } 853 854 /** 855 * schedule_deferred_locked - Schedule execution of deferred actions on an rq 856 * @rq: target rq 857 * 858 * Schedule execution of deferred actions on @rq. Equivalent to 859 * schedule_deferred() but requires @rq to be locked and can be more efficient. 860 */ 861 static void schedule_deferred_locked(struct rq *rq) 862 { 863 lockdep_assert_rq_held(rq); 864 865 /* 866 * If in the middle of waking up a task, task_woken_scx() will be called 867 * afterwards which will then run the deferred actions, no need to 868 * schedule anything. 869 */ 870 if (rq->scx.flags & SCX_RQ_IN_WAKEUP) 871 return; 872 873 /* Don't do anything if there already is a deferred operation. */ 874 if (rq->scx.flags & SCX_RQ_BAL_CB_PENDING) 875 return; 876 877 /* 878 * If in balance, the balance callbacks will be called before rq lock is 879 * released. Schedule one. 880 * 881 * 882 * We can't directly insert the callback into the 883 * rq's list: The call can drop its lock and make the pending balance 884 * callback visible to unrelated code paths that call rq_pin_lock(). 885 * 886 * Just let balance_one() know that it must do it itself. 887 */ 888 if (rq->scx.flags & SCX_RQ_IN_BALANCE) { 889 rq->scx.flags |= SCX_RQ_BAL_CB_PENDING; 890 return; 891 } 892 893 /* 894 * No scheduler hooks available. Use the generic irq_work path. The 895 * above WAKEUP and BALANCE paths should cover most of the cases and the 896 * time to IRQ re-enable shouldn't be long. 897 */ 898 schedule_deferred(rq); 899 } 900 901 /** 902 * touch_core_sched - Update timestamp used for core-sched task ordering 903 * @rq: rq to read clock from, must be locked 904 * @p: task to update the timestamp for 905 * 906 * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to 907 * implement global or local-DSQ FIFO ordering for core-sched. Should be called 908 * when a task becomes runnable and its turn on the CPU ends (e.g. slice 909 * exhaustion). 910 */ 911 static void touch_core_sched(struct rq *rq, struct task_struct *p) 912 { 913 lockdep_assert_rq_held(rq); 914 915 #ifdef CONFIG_SCHED_CORE 916 /* 917 * It's okay to update the timestamp spuriously. Use 918 * sched_core_disabled() which is cheaper than enabled(). 919 * 920 * As this is used to determine ordering between tasks of sibling CPUs, 921 * it may be better to use per-core dispatch sequence instead. 922 */ 923 if (!sched_core_disabled()) 924 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq)); 925 #endif 926 } 927 928 /** 929 * touch_core_sched_dispatch - Update core-sched timestamp on dispatch 930 * @rq: rq to read clock from, must be locked 931 * @p: task being dispatched 932 * 933 * If the BPF scheduler implements custom core-sched ordering via 934 * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO 935 * ordering within each local DSQ. This function is called from dispatch paths 936 * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect. 937 */ 938 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p) 939 { 940 lockdep_assert_rq_held(rq); 941 942 #ifdef CONFIG_SCHED_CORE 943 if (unlikely(SCX_HAS_OP(scx_root, core_sched_before))) 944 touch_core_sched(rq, p); 945 #endif 946 } 947 948 static void update_curr_scx(struct rq *rq) 949 { 950 struct task_struct *curr = rq->curr; 951 s64 delta_exec; 952 953 delta_exec = update_curr_common(rq); 954 if (unlikely(delta_exec <= 0)) 955 return; 956 957 if (curr->scx.slice != SCX_SLICE_INF) { 958 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec); 959 if (!curr->scx.slice) 960 touch_core_sched(rq, curr); 961 } 962 963 dl_server_update(&rq->ext_server, delta_exec); 964 } 965 966 static bool scx_dsq_priq_less(struct rb_node *node_a, 967 const struct rb_node *node_b) 968 { 969 const struct task_struct *a = 970 container_of(node_a, struct task_struct, scx.dsq_priq); 971 const struct task_struct *b = 972 container_of(node_b, struct task_struct, scx.dsq_priq); 973 974 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime); 975 } 976 977 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta) 978 { 979 /* 980 * scx_bpf_dsq_nr_queued() reads ->nr without locking. Use READ_ONCE() 981 * on the read side and WRITE_ONCE() on the write side to properly 982 * annotate the concurrent lockless access and avoid KCSAN warnings. 983 */ 984 WRITE_ONCE(dsq->nr, READ_ONCE(dsq->nr) + delta); 985 } 986 987 static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p) 988 { 989 p->scx.slice = READ_ONCE(scx_slice_dfl); 990 __scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1); 991 } 992 993 static void local_dsq_post_enq(struct scx_dispatch_q *dsq, struct task_struct *p, 994 u64 enq_flags) 995 { 996 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq); 997 bool preempt = false; 998 999 /* 1000 * If @rq is in balance, the CPU is already vacant and looking for the 1001 * next task to run. No need to preempt or trigger resched after moving 1002 * @p into its local DSQ. 1003 */ 1004 if (rq->scx.flags & SCX_RQ_IN_BALANCE) 1005 return; 1006 1007 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr && 1008 rq->curr->sched_class == &ext_sched_class) { 1009 rq->curr->scx.slice = 0; 1010 preempt = true; 1011 } 1012 1013 if (preempt || sched_class_above(&ext_sched_class, rq->curr->sched_class)) 1014 resched_curr(rq); 1015 } 1016 1017 static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq, 1018 struct task_struct *p, u64 enq_flags) 1019 { 1020 bool is_local = dsq->id == SCX_DSQ_LOCAL; 1021 1022 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); 1023 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) || 1024 !RB_EMPTY_NODE(&p->scx.dsq_priq)); 1025 1026 if (!is_local) { 1027 raw_spin_lock_nested(&dsq->lock, 1028 (enq_flags & SCX_ENQ_NESTED) ? SINGLE_DEPTH_NESTING : 0); 1029 1030 if (unlikely(dsq->id == SCX_DSQ_INVALID)) { 1031 scx_error(sch, "attempting to dispatch to a destroyed dsq"); 1032 /* fall back to the global dsq */ 1033 raw_spin_unlock(&dsq->lock); 1034 dsq = find_global_dsq(sch, p); 1035 raw_spin_lock(&dsq->lock); 1036 } 1037 } 1038 1039 if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) && 1040 (enq_flags & SCX_ENQ_DSQ_PRIQ))) { 1041 /* 1042 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from 1043 * their FIFO queues. To avoid confusion and accidentally 1044 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we 1045 * disallow any internal DSQ from doing vtime ordering of 1046 * tasks. 1047 */ 1048 scx_error(sch, "cannot use vtime ordering for built-in DSQs"); 1049 enq_flags &= ~SCX_ENQ_DSQ_PRIQ; 1050 } 1051 1052 if (enq_flags & SCX_ENQ_DSQ_PRIQ) { 1053 struct rb_node *rbp; 1054 1055 /* 1056 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are 1057 * linked to both the rbtree and list on PRIQs, this can only be 1058 * tested easily when adding the first task. 1059 */ 1060 if (unlikely(RB_EMPTY_ROOT(&dsq->priq) && 1061 nldsq_next_task(dsq, NULL, false))) 1062 scx_error(sch, "DSQ ID 0x%016llx already had FIFO-enqueued tasks", 1063 dsq->id); 1064 1065 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ; 1066 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less); 1067 1068 /* 1069 * Find the previous task and insert after it on the list so 1070 * that @dsq->list is vtime ordered. 1071 */ 1072 rbp = rb_prev(&p->scx.dsq_priq); 1073 if (rbp) { 1074 struct task_struct *prev = 1075 container_of(rbp, struct task_struct, 1076 scx.dsq_priq); 1077 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node); 1078 /* first task unchanged - no update needed */ 1079 } else { 1080 list_add(&p->scx.dsq_list.node, &dsq->list); 1081 /* not builtin and new task is at head - use fastpath */ 1082 rcu_assign_pointer(dsq->first_task, p); 1083 } 1084 } else { 1085 /* a FIFO DSQ shouldn't be using PRIQ enqueuing */ 1086 if (unlikely(!RB_EMPTY_ROOT(&dsq->priq))) 1087 scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks", 1088 dsq->id); 1089 1090 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) { 1091 list_add(&p->scx.dsq_list.node, &dsq->list); 1092 /* new task inserted at head - use fastpath */ 1093 if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN)) 1094 rcu_assign_pointer(dsq->first_task, p); 1095 } else { 1096 bool was_empty; 1097 1098 was_empty = list_empty(&dsq->list); 1099 list_add_tail(&p->scx.dsq_list.node, &dsq->list); 1100 if (was_empty && !(dsq->id & SCX_DSQ_FLAG_BUILTIN)) 1101 rcu_assign_pointer(dsq->first_task, p); 1102 } 1103 } 1104 1105 /* seq records the order tasks are queued, used by BPF DSQ iterator */ 1106 dsq->seq++; 1107 p->scx.dsq_seq = dsq->seq; 1108 1109 dsq_mod_nr(dsq, 1); 1110 p->scx.dsq = dsq; 1111 1112 /* 1113 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the 1114 * direct dispatch path, but we clear them here because the direct 1115 * dispatch verdict may be overridden on the enqueue path during e.g. 1116 * bypass. 1117 */ 1118 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID; 1119 p->scx.ddsp_enq_flags = 0; 1120 1121 /* 1122 * We're transitioning out of QUEUEING or DISPATCHING. store_release to 1123 * match waiters' load_acquire. 1124 */ 1125 if (enq_flags & SCX_ENQ_CLEAR_OPSS) 1126 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1127 1128 if (is_local) 1129 local_dsq_post_enq(dsq, p, enq_flags); 1130 else 1131 raw_spin_unlock(&dsq->lock); 1132 } 1133 1134 static void task_unlink_from_dsq(struct task_struct *p, 1135 struct scx_dispatch_q *dsq) 1136 { 1137 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node)); 1138 1139 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) { 1140 rb_erase(&p->scx.dsq_priq, &dsq->priq); 1141 RB_CLEAR_NODE(&p->scx.dsq_priq); 1142 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ; 1143 } 1144 1145 list_del_init(&p->scx.dsq_list.node); 1146 dsq_mod_nr(dsq, -1); 1147 1148 if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN) && dsq->first_task == p) { 1149 struct task_struct *first_task; 1150 1151 first_task = nldsq_next_task(dsq, NULL, false); 1152 rcu_assign_pointer(dsq->first_task, first_task); 1153 } 1154 } 1155 1156 static void dispatch_dequeue(struct rq *rq, struct task_struct *p) 1157 { 1158 struct scx_dispatch_q *dsq = p->scx.dsq; 1159 bool is_local = dsq == &rq->scx.local_dsq; 1160 1161 lockdep_assert_rq_held(rq); 1162 1163 if (!dsq) { 1164 /* 1165 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals. 1166 * Unlinking is all that's needed to cancel. 1167 */ 1168 if (unlikely(!list_empty(&p->scx.dsq_list.node))) 1169 list_del_init(&p->scx.dsq_list.node); 1170 1171 /* 1172 * When dispatching directly from the BPF scheduler to a local 1173 * DSQ, the task isn't associated with any DSQ but 1174 * @p->scx.holding_cpu may be set under the protection of 1175 * %SCX_OPSS_DISPATCHING. 1176 */ 1177 if (p->scx.holding_cpu >= 0) 1178 p->scx.holding_cpu = -1; 1179 1180 return; 1181 } 1182 1183 if (!is_local) 1184 raw_spin_lock(&dsq->lock); 1185 1186 /* 1187 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't 1188 * change underneath us. 1189 */ 1190 if (p->scx.holding_cpu < 0) { 1191 /* @p must still be on @dsq, dequeue */ 1192 task_unlink_from_dsq(p, dsq); 1193 } else { 1194 /* 1195 * We're racing against dispatch_to_local_dsq() which already 1196 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the 1197 * holding_cpu which tells dispatch_to_local_dsq() that it lost 1198 * the race. 1199 */ 1200 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node)); 1201 p->scx.holding_cpu = -1; 1202 } 1203 p->scx.dsq = NULL; 1204 1205 if (!is_local) 1206 raw_spin_unlock(&dsq->lock); 1207 } 1208 1209 /* 1210 * Abbreviated version of dispatch_dequeue() that can be used when both @p's rq 1211 * and dsq are locked. 1212 */ 1213 static void dispatch_dequeue_locked(struct task_struct *p, 1214 struct scx_dispatch_q *dsq) 1215 { 1216 lockdep_assert_rq_held(task_rq(p)); 1217 lockdep_assert_held(&dsq->lock); 1218 1219 task_unlink_from_dsq(p, dsq); 1220 p->scx.dsq = NULL; 1221 } 1222 1223 static struct scx_dispatch_q *find_dsq_for_dispatch(struct scx_sched *sch, 1224 struct rq *rq, u64 dsq_id, 1225 struct task_struct *p) 1226 { 1227 struct scx_dispatch_q *dsq; 1228 1229 if (dsq_id == SCX_DSQ_LOCAL) 1230 return &rq->scx.local_dsq; 1231 1232 if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) { 1233 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK; 1234 1235 if (!ops_cpu_valid(sch, cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict")) 1236 return find_global_dsq(sch, p); 1237 1238 return &cpu_rq(cpu)->scx.local_dsq; 1239 } 1240 1241 if (dsq_id == SCX_DSQ_GLOBAL) 1242 dsq = find_global_dsq(sch, p); 1243 else 1244 dsq = find_user_dsq(sch, dsq_id); 1245 1246 if (unlikely(!dsq)) { 1247 scx_error(sch, "non-existent DSQ 0x%llx for %s[%d]", 1248 dsq_id, p->comm, p->pid); 1249 return find_global_dsq(sch, p); 1250 } 1251 1252 return dsq; 1253 } 1254 1255 static void mark_direct_dispatch(struct scx_sched *sch, 1256 struct task_struct *ddsp_task, 1257 struct task_struct *p, u64 dsq_id, 1258 u64 enq_flags) 1259 { 1260 /* 1261 * Mark that dispatch already happened from ops.select_cpu() or 1262 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value 1263 * which can never match a valid task pointer. 1264 */ 1265 __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH)); 1266 1267 /* @p must match the task on the enqueue path */ 1268 if (unlikely(p != ddsp_task)) { 1269 if (IS_ERR(ddsp_task)) 1270 scx_error(sch, "%s[%d] already direct-dispatched", 1271 p->comm, p->pid); 1272 else 1273 scx_error(sch, "scheduling for %s[%d] but trying to direct-dispatch %s[%d]", 1274 ddsp_task->comm, ddsp_task->pid, 1275 p->comm, p->pid); 1276 return; 1277 } 1278 1279 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID); 1280 WARN_ON_ONCE(p->scx.ddsp_enq_flags); 1281 1282 p->scx.ddsp_dsq_id = dsq_id; 1283 p->scx.ddsp_enq_flags = enq_flags; 1284 } 1285 1286 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p, 1287 u64 enq_flags) 1288 { 1289 struct rq *rq = task_rq(p); 1290 struct scx_dispatch_q *dsq = 1291 find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); 1292 1293 touch_core_sched_dispatch(rq, p); 1294 1295 p->scx.ddsp_enq_flags |= enq_flags; 1296 1297 /* 1298 * We are in the enqueue path with @rq locked and pinned, and thus can't 1299 * double lock a remote rq and enqueue to its local DSQ. For 1300 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer 1301 * the enqueue so that it's executed when @rq can be unlocked. 1302 */ 1303 if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) { 1304 unsigned long opss; 1305 1306 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK; 1307 1308 switch (opss & SCX_OPSS_STATE_MASK) { 1309 case SCX_OPSS_NONE: 1310 break; 1311 case SCX_OPSS_QUEUEING: 1312 /* 1313 * As @p was never passed to the BPF side, _release is 1314 * not strictly necessary. Still do it for consistency. 1315 */ 1316 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1317 break; 1318 default: 1319 WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()", 1320 p->comm, p->pid, opss); 1321 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1322 break; 1323 } 1324 1325 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); 1326 list_add_tail(&p->scx.dsq_list.node, 1327 &rq->scx.ddsp_deferred_locals); 1328 schedule_deferred_locked(rq); 1329 return; 1330 } 1331 1332 dispatch_enqueue(sch, dsq, p, 1333 p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS); 1334 } 1335 1336 static bool scx_rq_online(struct rq *rq) 1337 { 1338 /* 1339 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates 1340 * the online state as seen from the BPF scheduler. cpu_active() test 1341 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will 1342 * stay set until the current scheduling operation is complete even if 1343 * we aren't locking @rq. 1344 */ 1345 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq))); 1346 } 1347 1348 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, 1349 int sticky_cpu) 1350 { 1351 struct scx_sched *sch = scx_root; 1352 struct task_struct **ddsp_taskp; 1353 struct scx_dispatch_q *dsq; 1354 unsigned long qseq; 1355 1356 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED)); 1357 1358 /* rq migration */ 1359 if (sticky_cpu == cpu_of(rq)) 1360 goto local_norefill; 1361 1362 /* 1363 * If !scx_rq_online(), we already told the BPF scheduler that the CPU 1364 * is offline and are just running the hotplug path. Don't bother the 1365 * BPF scheduler. 1366 */ 1367 if (!scx_rq_online(rq)) 1368 goto local; 1369 1370 if (scx_rq_bypassing(rq)) { 1371 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1); 1372 goto bypass; 1373 } 1374 1375 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) 1376 goto direct; 1377 1378 /* see %SCX_OPS_ENQ_EXITING */ 1379 if (!(sch->ops.flags & SCX_OPS_ENQ_EXITING) && 1380 unlikely(p->flags & PF_EXITING)) { 1381 __scx_add_event(sch, SCX_EV_ENQ_SKIP_EXITING, 1); 1382 goto local; 1383 } 1384 1385 /* see %SCX_OPS_ENQ_MIGRATION_DISABLED */ 1386 if (!(sch->ops.flags & SCX_OPS_ENQ_MIGRATION_DISABLED) && 1387 is_migration_disabled(p)) { 1388 __scx_add_event(sch, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1); 1389 goto local; 1390 } 1391 1392 if (unlikely(!SCX_HAS_OP(sch, enqueue))) 1393 goto global; 1394 1395 /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */ 1396 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT; 1397 1398 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); 1399 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq); 1400 1401 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task); 1402 WARN_ON_ONCE(*ddsp_taskp); 1403 *ddsp_taskp = p; 1404 1405 SCX_CALL_OP_TASK(sch, SCX_KF_ENQUEUE, enqueue, rq, p, enq_flags); 1406 1407 *ddsp_taskp = NULL; 1408 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) 1409 goto direct; 1410 1411 /* 1412 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or 1413 * dequeue may be waiting. The store_release matches their load_acquire. 1414 */ 1415 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq); 1416 return; 1417 1418 direct: 1419 direct_dispatch(sch, p, enq_flags); 1420 return; 1421 local_norefill: 1422 dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags); 1423 return; 1424 local: 1425 dsq = &rq->scx.local_dsq; 1426 goto enqueue; 1427 global: 1428 dsq = find_global_dsq(sch, p); 1429 goto enqueue; 1430 bypass: 1431 dsq = &task_rq(p)->scx.bypass_dsq; 1432 goto enqueue; 1433 1434 enqueue: 1435 /* 1436 * For task-ordering, slice refill must be treated as implying the end 1437 * of the current slice. Otherwise, the longer @p stays on the CPU, the 1438 * higher priority it becomes from scx_prio_less()'s POV. 1439 */ 1440 touch_core_sched(rq, p); 1441 refill_task_slice_dfl(sch, p); 1442 dispatch_enqueue(sch, dsq, p, enq_flags); 1443 } 1444 1445 static bool task_runnable(const struct task_struct *p) 1446 { 1447 return !list_empty(&p->scx.runnable_node); 1448 } 1449 1450 static void set_task_runnable(struct rq *rq, struct task_struct *p) 1451 { 1452 lockdep_assert_rq_held(rq); 1453 1454 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) { 1455 p->scx.runnable_at = jiffies; 1456 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT; 1457 } 1458 1459 /* 1460 * list_add_tail() must be used. scx_bypass() depends on tasks being 1461 * appended to the runnable_list. 1462 */ 1463 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list); 1464 } 1465 1466 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at) 1467 { 1468 list_del_init(&p->scx.runnable_node); 1469 if (reset_runnable_at) 1470 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; 1471 } 1472 1473 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags) 1474 { 1475 struct scx_sched *sch = scx_root; 1476 int sticky_cpu = p->scx.sticky_cpu; 1477 1478 if (enq_flags & ENQUEUE_WAKEUP) 1479 rq->scx.flags |= SCX_RQ_IN_WAKEUP; 1480 1481 enq_flags |= rq->scx.extra_enq_flags; 1482 1483 if (sticky_cpu >= 0) 1484 p->scx.sticky_cpu = -1; 1485 1486 /* 1487 * Restoring a running task will be immediately followed by 1488 * set_next_task_scx() which expects the task to not be on the BPF 1489 * scheduler as tasks can only start running through local DSQs. Force 1490 * direct-dispatch into the local DSQ by setting the sticky_cpu. 1491 */ 1492 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p)) 1493 sticky_cpu = cpu_of(rq); 1494 1495 if (p->scx.flags & SCX_TASK_QUEUED) { 1496 WARN_ON_ONCE(!task_runnable(p)); 1497 goto out; 1498 } 1499 1500 set_task_runnable(rq, p); 1501 p->scx.flags |= SCX_TASK_QUEUED; 1502 rq->scx.nr_running++; 1503 add_nr_running(rq, 1); 1504 1505 if (SCX_HAS_OP(sch, runnable) && !task_on_rq_migrating(p)) 1506 SCX_CALL_OP_TASK(sch, SCX_KF_REST, runnable, rq, p, enq_flags); 1507 1508 if (enq_flags & SCX_ENQ_WAKEUP) 1509 touch_core_sched(rq, p); 1510 1511 /* Start dl_server if this is the first task being enqueued */ 1512 if (rq->scx.nr_running == 1) 1513 dl_server_start(&rq->ext_server); 1514 1515 do_enqueue_task(rq, p, enq_flags, sticky_cpu); 1516 out: 1517 rq->scx.flags &= ~SCX_RQ_IN_WAKEUP; 1518 1519 if ((enq_flags & SCX_ENQ_CPU_SELECTED) && 1520 unlikely(cpu_of(rq) != p->scx.selected_cpu)) 1521 __scx_add_event(sch, SCX_EV_SELECT_CPU_FALLBACK, 1); 1522 } 1523 1524 static void ops_dequeue(struct rq *rq, struct task_struct *p, u64 deq_flags) 1525 { 1526 struct scx_sched *sch = scx_root; 1527 unsigned long opss; 1528 1529 /* dequeue is always temporary, don't reset runnable_at */ 1530 clr_task_runnable(p, false); 1531 1532 /* acquire ensures that we see the preceding updates on QUEUED */ 1533 opss = atomic_long_read_acquire(&p->scx.ops_state); 1534 1535 switch (opss & SCX_OPSS_STATE_MASK) { 1536 case SCX_OPSS_NONE: 1537 break; 1538 case SCX_OPSS_QUEUEING: 1539 /* 1540 * QUEUEING is started and finished while holding @p's rq lock. 1541 * As we're holding the rq lock now, we shouldn't see QUEUEING. 1542 */ 1543 BUG(); 1544 case SCX_OPSS_QUEUED: 1545 if (SCX_HAS_OP(sch, dequeue)) 1546 SCX_CALL_OP_TASK(sch, SCX_KF_REST, dequeue, rq, 1547 p, deq_flags); 1548 1549 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, 1550 SCX_OPSS_NONE)) 1551 break; 1552 fallthrough; 1553 case SCX_OPSS_DISPATCHING: 1554 /* 1555 * If @p is being dispatched from the BPF scheduler to a DSQ, 1556 * wait for the transfer to complete so that @p doesn't get 1557 * added to its DSQ after dequeueing is complete. 1558 * 1559 * As we're waiting on DISPATCHING with the rq locked, the 1560 * dispatching side shouldn't try to lock the rq while 1561 * DISPATCHING is set. See dispatch_to_local_dsq(). 1562 * 1563 * DISPATCHING shouldn't have qseq set and control can reach 1564 * here with NONE @opss from the above QUEUED case block. 1565 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss. 1566 */ 1567 wait_ops_state(p, SCX_OPSS_DISPATCHING); 1568 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); 1569 break; 1570 } 1571 } 1572 1573 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags) 1574 { 1575 struct scx_sched *sch = scx_root; 1576 1577 if (!(p->scx.flags & SCX_TASK_QUEUED)) { 1578 WARN_ON_ONCE(task_runnable(p)); 1579 return true; 1580 } 1581 1582 ops_dequeue(rq, p, deq_flags); 1583 1584 /* 1585 * A currently running task which is going off @rq first gets dequeued 1586 * and then stops running. As we want running <-> stopping transitions 1587 * to be contained within runnable <-> quiescent transitions, trigger 1588 * ->stopping() early here instead of in put_prev_task_scx(). 1589 * 1590 * @p may go through multiple stopping <-> running transitions between 1591 * here and put_prev_task_scx() if task attribute changes occur while 1592 * balance_one() leaves @rq unlocked. However, they don't contain any 1593 * information meaningful to the BPF scheduler and can be suppressed by 1594 * skipping the callbacks if the task is !QUEUED. 1595 */ 1596 if (SCX_HAS_OP(sch, stopping) && task_current(rq, p)) { 1597 update_curr_scx(rq); 1598 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, false); 1599 } 1600 1601 if (SCX_HAS_OP(sch, quiescent) && !task_on_rq_migrating(p)) 1602 SCX_CALL_OP_TASK(sch, SCX_KF_REST, quiescent, rq, p, deq_flags); 1603 1604 if (deq_flags & SCX_DEQ_SLEEP) 1605 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP; 1606 else 1607 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP; 1608 1609 p->scx.flags &= ~SCX_TASK_QUEUED; 1610 rq->scx.nr_running--; 1611 sub_nr_running(rq, 1); 1612 1613 dispatch_dequeue(rq, p); 1614 return true; 1615 } 1616 1617 static void yield_task_scx(struct rq *rq) 1618 { 1619 struct scx_sched *sch = scx_root; 1620 struct task_struct *p = rq->donor; 1621 1622 if (SCX_HAS_OP(sch, yield)) 1623 SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, p, NULL); 1624 else 1625 p->scx.slice = 0; 1626 } 1627 1628 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to) 1629 { 1630 struct scx_sched *sch = scx_root; 1631 struct task_struct *from = rq->donor; 1632 1633 if (SCX_HAS_OP(sch, yield)) 1634 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, 1635 from, to); 1636 else 1637 return false; 1638 } 1639 1640 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags, 1641 struct scx_dispatch_q *src_dsq, 1642 struct rq *dst_rq) 1643 { 1644 struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq; 1645 1646 /* @dsq is locked and @p is on @dst_rq */ 1647 lockdep_assert_held(&src_dsq->lock); 1648 lockdep_assert_rq_held(dst_rq); 1649 1650 WARN_ON_ONCE(p->scx.holding_cpu >= 0); 1651 1652 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) 1653 list_add(&p->scx.dsq_list.node, &dst_dsq->list); 1654 else 1655 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list); 1656 1657 dsq_mod_nr(dst_dsq, 1); 1658 p->scx.dsq = dst_dsq; 1659 1660 local_dsq_post_enq(dst_dsq, p, enq_flags); 1661 } 1662 1663 /** 1664 * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ 1665 * @p: task to move 1666 * @enq_flags: %SCX_ENQ_* 1667 * @src_rq: rq to move the task from, locked on entry, released on return 1668 * @dst_rq: rq to move the task into, locked on return 1669 * 1670 * Move @p which is currently on @src_rq to @dst_rq's local DSQ. 1671 */ 1672 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, 1673 struct rq *src_rq, struct rq *dst_rq) 1674 { 1675 lockdep_assert_rq_held(src_rq); 1676 1677 /* the following marks @p MIGRATING which excludes dequeue */ 1678 deactivate_task(src_rq, p, 0); 1679 set_task_cpu(p, cpu_of(dst_rq)); 1680 p->scx.sticky_cpu = cpu_of(dst_rq); 1681 1682 raw_spin_rq_unlock(src_rq); 1683 raw_spin_rq_lock(dst_rq); 1684 1685 /* 1686 * We want to pass scx-specific enq_flags but activate_task() will 1687 * truncate the upper 32 bit. As we own @rq, we can pass them through 1688 * @rq->scx.extra_enq_flags instead. 1689 */ 1690 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr)); 1691 WARN_ON_ONCE(dst_rq->scx.extra_enq_flags); 1692 dst_rq->scx.extra_enq_flags = enq_flags; 1693 activate_task(dst_rq, p, 0); 1694 dst_rq->scx.extra_enq_flags = 0; 1695 } 1696 1697 /* 1698 * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two 1699 * differences: 1700 * 1701 * - is_cpu_allowed() asks "Can this task run on this CPU?" while 1702 * task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to 1703 * this CPU?". 1704 * 1705 * While migration is disabled, is_cpu_allowed() has to say "yes" as the task 1706 * must be allowed to finish on the CPU that it's currently on regardless of 1707 * the CPU state. However, task_can_run_on_remote_rq() must say "no" as the 1708 * BPF scheduler shouldn't attempt to migrate a task which has migration 1709 * disabled. 1710 * 1711 * - The BPF scheduler is bypassed while the rq is offline and we can always say 1712 * no to the BPF scheduler initiated migrations while offline. 1713 * 1714 * The caller must ensure that @p and @rq are on different CPUs. 1715 */ 1716 static bool task_can_run_on_remote_rq(struct scx_sched *sch, 1717 struct task_struct *p, struct rq *rq, 1718 bool enforce) 1719 { 1720 int cpu = cpu_of(rq); 1721 1722 WARN_ON_ONCE(task_cpu(p) == cpu); 1723 1724 /* 1725 * If @p has migration disabled, @p->cpus_ptr is updated to contain only 1726 * the pinned CPU in migrate_disable_switch() while @p is being switched 1727 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is 1728 * updated and thus another CPU may see @p on a DSQ inbetween leading to 1729 * @p passing the below task_allowed_on_cpu() check while migration is 1730 * disabled. 1731 * 1732 * Test the migration disabled state first as the race window is narrow 1733 * and the BPF scheduler failing to check migration disabled state can 1734 * easily be masked if task_allowed_on_cpu() is done first. 1735 */ 1736 if (unlikely(is_migration_disabled(p))) { 1737 if (enforce) 1738 scx_error(sch, "SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d", 1739 p->comm, p->pid, task_cpu(p), cpu); 1740 return false; 1741 } 1742 1743 /* 1744 * We don't require the BPF scheduler to avoid dispatching to offline 1745 * CPUs mostly for convenience but also because CPUs can go offline 1746 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the 1747 * picked CPU is outside the allowed mask. 1748 */ 1749 if (!task_allowed_on_cpu(p, cpu)) { 1750 if (enforce) 1751 scx_error(sch, "SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]", 1752 cpu, p->comm, p->pid); 1753 return false; 1754 } 1755 1756 if (!scx_rq_online(rq)) { 1757 if (enforce) 1758 __scx_add_event(sch, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1); 1759 return false; 1760 } 1761 1762 return true; 1763 } 1764 1765 /** 1766 * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq 1767 * @p: target task 1768 * @dsq: locked DSQ @p is currently on 1769 * @src_rq: rq @p is currently on, stable with @dsq locked 1770 * 1771 * Called with @dsq locked but no rq's locked. We want to move @p to a different 1772 * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is 1773 * required when transferring into a local DSQ. Even when transferring into a 1774 * non-local DSQ, it's better to use the same mechanism to protect against 1775 * dequeues and maintain the invariant that @p->scx.dsq can only change while 1776 * @src_rq is locked, which e.g. scx_dump_task() depends on. 1777 * 1778 * We want to grab @src_rq but that can deadlock if we try while locking @dsq, 1779 * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As 1780 * this may race with dequeue, which can't drop the rq lock or fail, do a little 1781 * dancing from our side. 1782 * 1783 * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets 1784 * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu 1785 * would be cleared to -1. While other cpus may have updated it to different 1786 * values afterwards, as this operation can't be preempted or recurse, the 1787 * holding_cpu can never become this CPU again before we're done. Thus, we can 1788 * tell whether we lost to dequeue by testing whether the holding_cpu still 1789 * points to this CPU. See dispatch_dequeue() for the counterpart. 1790 * 1791 * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is 1792 * still valid. %false if lost to dequeue. 1793 */ 1794 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p, 1795 struct scx_dispatch_q *dsq, 1796 struct rq *src_rq) 1797 { 1798 s32 cpu = raw_smp_processor_id(); 1799 1800 lockdep_assert_held(&dsq->lock); 1801 1802 WARN_ON_ONCE(p->scx.holding_cpu >= 0); 1803 task_unlink_from_dsq(p, dsq); 1804 p->scx.holding_cpu = cpu; 1805 1806 raw_spin_unlock(&dsq->lock); 1807 raw_spin_rq_lock(src_rq); 1808 1809 /* task_rq couldn't have changed if we're still the holding cpu */ 1810 return likely(p->scx.holding_cpu == cpu) && 1811 !WARN_ON_ONCE(src_rq != task_rq(p)); 1812 } 1813 1814 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p, 1815 struct scx_dispatch_q *dsq, struct rq *src_rq) 1816 { 1817 raw_spin_rq_unlock(this_rq); 1818 1819 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) { 1820 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq); 1821 return true; 1822 } else { 1823 raw_spin_rq_unlock(src_rq); 1824 raw_spin_rq_lock(this_rq); 1825 return false; 1826 } 1827 } 1828 1829 /** 1830 * move_task_between_dsqs() - Move a task from one DSQ to another 1831 * @sch: scx_sched being operated on 1832 * @p: target task 1833 * @enq_flags: %SCX_ENQ_* 1834 * @src_dsq: DSQ @p is currently on, must not be a local DSQ 1835 * @dst_dsq: DSQ @p is being moved to, can be any DSQ 1836 * 1837 * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local 1838 * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq 1839 * will change. As @p's task_rq is locked, this function doesn't need to use the 1840 * holding_cpu mechanism. 1841 * 1842 * On return, @src_dsq is unlocked and only @p's new task_rq, which is the 1843 * return value, is locked. 1844 */ 1845 static struct rq *move_task_between_dsqs(struct scx_sched *sch, 1846 struct task_struct *p, u64 enq_flags, 1847 struct scx_dispatch_q *src_dsq, 1848 struct scx_dispatch_q *dst_dsq) 1849 { 1850 struct rq *src_rq = task_rq(p), *dst_rq; 1851 1852 BUG_ON(src_dsq->id == SCX_DSQ_LOCAL); 1853 lockdep_assert_held(&src_dsq->lock); 1854 lockdep_assert_rq_held(src_rq); 1855 1856 if (dst_dsq->id == SCX_DSQ_LOCAL) { 1857 dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq); 1858 if (src_rq != dst_rq && 1859 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) { 1860 dst_dsq = find_global_dsq(sch, p); 1861 dst_rq = src_rq; 1862 } 1863 } else { 1864 /* no need to migrate if destination is a non-local DSQ */ 1865 dst_rq = src_rq; 1866 } 1867 1868 /* 1869 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different 1870 * CPU, @p will be migrated. 1871 */ 1872 if (dst_dsq->id == SCX_DSQ_LOCAL) { 1873 /* @p is going from a non-local DSQ to a local DSQ */ 1874 if (src_rq == dst_rq) { 1875 task_unlink_from_dsq(p, src_dsq); 1876 move_local_task_to_local_dsq(p, enq_flags, 1877 src_dsq, dst_rq); 1878 raw_spin_unlock(&src_dsq->lock); 1879 } else { 1880 raw_spin_unlock(&src_dsq->lock); 1881 move_remote_task_to_local_dsq(p, enq_flags, 1882 src_rq, dst_rq); 1883 } 1884 } else { 1885 /* 1886 * @p is going from a non-local DSQ to a non-local DSQ. As 1887 * $src_dsq is already locked, do an abbreviated dequeue. 1888 */ 1889 dispatch_dequeue_locked(p, src_dsq); 1890 raw_spin_unlock(&src_dsq->lock); 1891 1892 dispatch_enqueue(sch, dst_dsq, p, enq_flags); 1893 } 1894 1895 return dst_rq; 1896 } 1897 1898 static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq, 1899 struct scx_dispatch_q *dsq) 1900 { 1901 struct task_struct *p; 1902 retry: 1903 /* 1904 * The caller can't expect to successfully consume a task if the task's 1905 * addition to @dsq isn't guaranteed to be visible somehow. Test 1906 * @dsq->list without locking and skip if it seems empty. 1907 */ 1908 if (list_empty(&dsq->list)) 1909 return false; 1910 1911 raw_spin_lock(&dsq->lock); 1912 1913 nldsq_for_each_task(p, dsq) { 1914 struct rq *task_rq = task_rq(p); 1915 1916 /* 1917 * This loop can lead to multiple lockup scenarios, e.g. the BPF 1918 * scheduler can put an enormous number of affinitized tasks into 1919 * a contended DSQ, or the outer retry loop can repeatedly race 1920 * against scx_bypass() dequeueing tasks from @dsq trying to put 1921 * the system into the bypass mode. This can easily live-lock the 1922 * machine. If aborting, exit from all non-bypass DSQs. 1923 */ 1924 if (unlikely(READ_ONCE(scx_aborting)) && dsq->id != SCX_DSQ_BYPASS) 1925 break; 1926 1927 if (rq == task_rq) { 1928 task_unlink_from_dsq(p, dsq); 1929 move_local_task_to_local_dsq(p, 0, dsq, rq); 1930 raw_spin_unlock(&dsq->lock); 1931 return true; 1932 } 1933 1934 if (task_can_run_on_remote_rq(sch, p, rq, false)) { 1935 if (likely(consume_remote_task(rq, p, dsq, task_rq))) 1936 return true; 1937 goto retry; 1938 } 1939 } 1940 1941 raw_spin_unlock(&dsq->lock); 1942 return false; 1943 } 1944 1945 static bool consume_global_dsq(struct scx_sched *sch, struct rq *rq) 1946 { 1947 int node = cpu_to_node(cpu_of(rq)); 1948 1949 return consume_dispatch_q(sch, rq, sch->global_dsqs[node]); 1950 } 1951 1952 /** 1953 * dispatch_to_local_dsq - Dispatch a task to a local dsq 1954 * @sch: scx_sched being operated on 1955 * @rq: current rq which is locked 1956 * @dst_dsq: destination DSQ 1957 * @p: task to dispatch 1958 * @enq_flags: %SCX_ENQ_* 1959 * 1960 * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local 1961 * DSQ. This function performs all the synchronization dancing needed because 1962 * local DSQs are protected with rq locks. 1963 * 1964 * The caller must have exclusive ownership of @p (e.g. through 1965 * %SCX_OPSS_DISPATCHING). 1966 */ 1967 static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq, 1968 struct scx_dispatch_q *dst_dsq, 1969 struct task_struct *p, u64 enq_flags) 1970 { 1971 struct rq *src_rq = task_rq(p); 1972 struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq); 1973 struct rq *locked_rq = rq; 1974 1975 /* 1976 * We're synchronized against dequeue through DISPATCHING. As @p can't 1977 * be dequeued, its task_rq and cpus_allowed are stable too. 1978 * 1979 * If dispatching to @rq that @p is already on, no lock dancing needed. 1980 */ 1981 if (rq == src_rq && rq == dst_rq) { 1982 dispatch_enqueue(sch, dst_dsq, p, 1983 enq_flags | SCX_ENQ_CLEAR_OPSS); 1984 return; 1985 } 1986 1987 if (src_rq != dst_rq && 1988 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) { 1989 dispatch_enqueue(sch, find_global_dsq(sch, p), p, 1990 enq_flags | SCX_ENQ_CLEAR_OPSS); 1991 return; 1992 } 1993 1994 /* 1995 * @p is on a possibly remote @src_rq which we need to lock to move the 1996 * task. If dequeue is in progress, it'd be locking @src_rq and waiting 1997 * on DISPATCHING, so we can't grab @src_rq lock while holding 1998 * DISPATCHING. 1999 * 2000 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that 2001 * we're moving from a DSQ and use the same mechanism - mark the task 2002 * under transfer with holding_cpu, release DISPATCHING and then follow 2003 * the same protocol. See unlink_dsq_and_lock_src_rq(). 2004 */ 2005 p->scx.holding_cpu = raw_smp_processor_id(); 2006 2007 /* store_release ensures that dequeue sees the above */ 2008 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 2009 2010 /* switch to @src_rq lock */ 2011 if (locked_rq != src_rq) { 2012 raw_spin_rq_unlock(locked_rq); 2013 locked_rq = src_rq; 2014 raw_spin_rq_lock(src_rq); 2015 } 2016 2017 /* task_rq couldn't have changed if we're still the holding cpu */ 2018 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) && 2019 !WARN_ON_ONCE(src_rq != task_rq(p))) { 2020 /* 2021 * If @p is staying on the same rq, there's no need to go 2022 * through the full deactivate/activate cycle. Optimize by 2023 * abbreviating move_remote_task_to_local_dsq(). 2024 */ 2025 if (src_rq == dst_rq) { 2026 p->scx.holding_cpu = -1; 2027 dispatch_enqueue(sch, &dst_rq->scx.local_dsq, p, 2028 enq_flags); 2029 } else { 2030 move_remote_task_to_local_dsq(p, enq_flags, 2031 src_rq, dst_rq); 2032 /* task has been moved to dst_rq, which is now locked */ 2033 locked_rq = dst_rq; 2034 } 2035 2036 /* if the destination CPU is idle, wake it up */ 2037 if (sched_class_above(p->sched_class, dst_rq->curr->sched_class)) 2038 resched_curr(dst_rq); 2039 } 2040 2041 /* switch back to @rq lock */ 2042 if (locked_rq != rq) { 2043 raw_spin_rq_unlock(locked_rq); 2044 raw_spin_rq_lock(rq); 2045 } 2046 } 2047 2048 /** 2049 * finish_dispatch - Asynchronously finish dispatching a task 2050 * @rq: current rq which is locked 2051 * @p: task to finish dispatching 2052 * @qseq_at_dispatch: qseq when @p started getting dispatched 2053 * @dsq_id: destination DSQ ID 2054 * @enq_flags: %SCX_ENQ_* 2055 * 2056 * Dispatching to local DSQs may need to wait for queueing to complete or 2057 * require rq lock dancing. As we don't wanna do either while inside 2058 * ops.dispatch() to avoid locking order inversion, we split dispatching into 2059 * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the 2060 * task and its qseq. Once ops.dispatch() returns, this function is called to 2061 * finish up. 2062 * 2063 * There is no guarantee that @p is still valid for dispatching or even that it 2064 * was valid in the first place. Make sure that the task is still owned by the 2065 * BPF scheduler and claim the ownership before dispatching. 2066 */ 2067 static void finish_dispatch(struct scx_sched *sch, struct rq *rq, 2068 struct task_struct *p, 2069 unsigned long qseq_at_dispatch, 2070 u64 dsq_id, u64 enq_flags) 2071 { 2072 struct scx_dispatch_q *dsq; 2073 unsigned long opss; 2074 2075 touch_core_sched_dispatch(rq, p); 2076 retry: 2077 /* 2078 * No need for _acquire here. @p is accessed only after a successful 2079 * try_cmpxchg to DISPATCHING. 2080 */ 2081 opss = atomic_long_read(&p->scx.ops_state); 2082 2083 switch (opss & SCX_OPSS_STATE_MASK) { 2084 case SCX_OPSS_DISPATCHING: 2085 case SCX_OPSS_NONE: 2086 /* someone else already got to it */ 2087 return; 2088 case SCX_OPSS_QUEUED: 2089 /* 2090 * If qseq doesn't match, @p has gone through at least one 2091 * dispatch/dequeue and re-enqueue cycle between 2092 * scx_bpf_dsq_insert() and here and we have no claim on it. 2093 */ 2094 if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch) 2095 return; 2096 2097 /* 2098 * While we know @p is accessible, we don't yet have a claim on 2099 * it - the BPF scheduler is allowed to dispatch tasks 2100 * spuriously and there can be a racing dequeue attempt. Let's 2101 * claim @p by atomically transitioning it from QUEUED to 2102 * DISPATCHING. 2103 */ 2104 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, 2105 SCX_OPSS_DISPATCHING))) 2106 break; 2107 goto retry; 2108 case SCX_OPSS_QUEUEING: 2109 /* 2110 * do_enqueue_task() is in the process of transferring the task 2111 * to the BPF scheduler while holding @p's rq lock. As we aren't 2112 * holding any kernel or BPF resource that the enqueue path may 2113 * depend upon, it's safe to wait. 2114 */ 2115 wait_ops_state(p, opss); 2116 goto retry; 2117 } 2118 2119 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED)); 2120 2121 dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, p); 2122 2123 if (dsq->id == SCX_DSQ_LOCAL) 2124 dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags); 2125 else 2126 dispatch_enqueue(sch, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); 2127 } 2128 2129 static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq) 2130 { 2131 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 2132 u32 u; 2133 2134 for (u = 0; u < dspc->cursor; u++) { 2135 struct scx_dsp_buf_ent *ent = &dspc->buf[u]; 2136 2137 finish_dispatch(sch, rq, ent->task, ent->qseq, ent->dsq_id, 2138 ent->enq_flags); 2139 } 2140 2141 dspc->nr_tasks += dspc->cursor; 2142 dspc->cursor = 0; 2143 } 2144 2145 static inline void maybe_queue_balance_callback(struct rq *rq) 2146 { 2147 lockdep_assert_rq_held(rq); 2148 2149 if (!(rq->scx.flags & SCX_RQ_BAL_CB_PENDING)) 2150 return; 2151 2152 queue_balance_callback(rq, &rq->scx.deferred_bal_cb, 2153 deferred_bal_cb_workfn); 2154 2155 rq->scx.flags &= ~SCX_RQ_BAL_CB_PENDING; 2156 } 2157 2158 static int balance_one(struct rq *rq, struct task_struct *prev) 2159 { 2160 struct scx_sched *sch = scx_root; 2161 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 2162 bool prev_on_scx = prev->sched_class == &ext_sched_class; 2163 bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED; 2164 int nr_loops = SCX_DSP_MAX_LOOPS; 2165 2166 lockdep_assert_rq_held(rq); 2167 rq->scx.flags |= SCX_RQ_IN_BALANCE; 2168 rq->scx.flags &= ~SCX_RQ_BAL_KEEP; 2169 2170 if ((sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT) && 2171 unlikely(rq->scx.cpu_released)) { 2172 /* 2173 * If the previous sched_class for the current CPU was not SCX, 2174 * notify the BPF scheduler that it again has control of the 2175 * core. This callback complements ->cpu_release(), which is 2176 * emitted in switch_class(). 2177 */ 2178 if (SCX_HAS_OP(sch, cpu_acquire)) 2179 SCX_CALL_OP(sch, SCX_KF_REST, cpu_acquire, rq, 2180 cpu_of(rq), NULL); 2181 rq->scx.cpu_released = false; 2182 } 2183 2184 if (prev_on_scx) { 2185 update_curr_scx(rq); 2186 2187 /* 2188 * If @prev is runnable & has slice left, it has priority and 2189 * fetching more just increases latency for the fetched tasks. 2190 * Tell pick_task_scx() to keep running @prev. If the BPF 2191 * scheduler wants to handle this explicitly, it should 2192 * implement ->cpu_release(). 2193 * 2194 * See scx_disable_workfn() for the explanation on the bypassing 2195 * test. 2196 */ 2197 if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) { 2198 rq->scx.flags |= SCX_RQ_BAL_KEEP; 2199 goto has_tasks; 2200 } 2201 } 2202 2203 /* if there already are tasks to run, nothing to do */ 2204 if (rq->scx.local_dsq.nr) 2205 goto has_tasks; 2206 2207 if (consume_global_dsq(sch, rq)) 2208 goto has_tasks; 2209 2210 if (scx_rq_bypassing(rq)) { 2211 if (consume_dispatch_q(sch, rq, &rq->scx.bypass_dsq)) 2212 goto has_tasks; 2213 else 2214 goto no_tasks; 2215 } 2216 2217 if (unlikely(!SCX_HAS_OP(sch, dispatch)) || !scx_rq_online(rq)) 2218 goto no_tasks; 2219 2220 dspc->rq = rq; 2221 2222 /* 2223 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock, 2224 * the local DSQ might still end up empty after a successful 2225 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch() 2226 * produced some tasks, retry. The BPF scheduler may depend on this 2227 * looping behavior to simplify its implementation. 2228 */ 2229 do { 2230 dspc->nr_tasks = 0; 2231 2232 SCX_CALL_OP(sch, SCX_KF_DISPATCH, dispatch, rq, 2233 cpu_of(rq), prev_on_scx ? prev : NULL); 2234 2235 flush_dispatch_buf(sch, rq); 2236 2237 if (prev_on_rq && prev->scx.slice) { 2238 rq->scx.flags |= SCX_RQ_BAL_KEEP; 2239 goto has_tasks; 2240 } 2241 if (rq->scx.local_dsq.nr) 2242 goto has_tasks; 2243 if (consume_global_dsq(sch, rq)) 2244 goto has_tasks; 2245 2246 /* 2247 * ops.dispatch() can trap us in this loop by repeatedly 2248 * dispatching ineligible tasks. Break out once in a while to 2249 * allow the watchdog to run. As IRQ can't be enabled in 2250 * balance(), we want to complete this scheduling cycle and then 2251 * start a new one. IOW, we want to call resched_curr() on the 2252 * next, most likely idle, task, not the current one. Use 2253 * scx_kick_cpu() for deferred kicking. 2254 */ 2255 if (unlikely(!--nr_loops)) { 2256 scx_kick_cpu(sch, cpu_of(rq), 0); 2257 break; 2258 } 2259 } while (dspc->nr_tasks); 2260 2261 no_tasks: 2262 /* 2263 * Didn't find another task to run. Keep running @prev unless 2264 * %SCX_OPS_ENQ_LAST is in effect. 2265 */ 2266 if (prev_on_rq && 2267 (!(sch->ops.flags & SCX_OPS_ENQ_LAST) || scx_rq_bypassing(rq))) { 2268 rq->scx.flags |= SCX_RQ_BAL_KEEP; 2269 __scx_add_event(sch, SCX_EV_DISPATCH_KEEP_LAST, 1); 2270 goto has_tasks; 2271 } 2272 rq->scx.flags &= ~SCX_RQ_IN_BALANCE; 2273 return false; 2274 2275 has_tasks: 2276 rq->scx.flags &= ~SCX_RQ_IN_BALANCE; 2277 return true; 2278 } 2279 2280 static void process_ddsp_deferred_locals(struct rq *rq) 2281 { 2282 struct task_struct *p; 2283 2284 lockdep_assert_rq_held(rq); 2285 2286 /* 2287 * Now that @rq can be unlocked, execute the deferred enqueueing of 2288 * tasks directly dispatched to the local DSQs of other CPUs. See 2289 * direct_dispatch(). Keep popping from the head instead of using 2290 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq 2291 * temporarily. 2292 */ 2293 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals, 2294 struct task_struct, scx.dsq_list.node))) { 2295 struct scx_sched *sch = scx_root; 2296 struct scx_dispatch_q *dsq; 2297 2298 list_del_init(&p->scx.dsq_list.node); 2299 2300 dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); 2301 if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL)) 2302 dispatch_to_local_dsq(sch, rq, dsq, p, 2303 p->scx.ddsp_enq_flags); 2304 } 2305 } 2306 2307 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) 2308 { 2309 struct scx_sched *sch = scx_root; 2310 2311 if (p->scx.flags & SCX_TASK_QUEUED) { 2312 /* 2313 * Core-sched might decide to execute @p before it is 2314 * dispatched. Call ops_dequeue() to notify the BPF scheduler. 2315 */ 2316 ops_dequeue(rq, p, SCX_DEQ_CORE_SCHED_EXEC); 2317 dispatch_dequeue(rq, p); 2318 } 2319 2320 p->se.exec_start = rq_clock_task(rq); 2321 2322 /* see dequeue_task_scx() on why we skip when !QUEUED */ 2323 if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED)) 2324 SCX_CALL_OP_TASK(sch, SCX_KF_REST, running, rq, p); 2325 2326 clr_task_runnable(p, true); 2327 2328 /* 2329 * @p is getting newly scheduled or got kicked after someone updated its 2330 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick(). 2331 */ 2332 if ((p->scx.slice == SCX_SLICE_INF) != 2333 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) { 2334 if (p->scx.slice == SCX_SLICE_INF) 2335 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK; 2336 else 2337 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK; 2338 2339 sched_update_tick_dependency(rq); 2340 2341 /* 2342 * For now, let's refresh the load_avgs just when transitioning 2343 * in and out of nohz. In the future, we might want to add a 2344 * mechanism which calls the following periodically on 2345 * tick-stopped CPUs. 2346 */ 2347 update_other_load_avgs(rq); 2348 } 2349 } 2350 2351 static enum scx_cpu_preempt_reason 2352 preempt_reason_from_class(const struct sched_class *class) 2353 { 2354 if (class == &stop_sched_class) 2355 return SCX_CPU_PREEMPT_STOP; 2356 if (class == &dl_sched_class) 2357 return SCX_CPU_PREEMPT_DL; 2358 if (class == &rt_sched_class) 2359 return SCX_CPU_PREEMPT_RT; 2360 return SCX_CPU_PREEMPT_UNKNOWN; 2361 } 2362 2363 static void switch_class(struct rq *rq, struct task_struct *next) 2364 { 2365 struct scx_sched *sch = scx_root; 2366 const struct sched_class *next_class = next->sched_class; 2367 2368 if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT)) 2369 return; 2370 2371 /* 2372 * The callback is conceptually meant to convey that the CPU is no 2373 * longer under the control of SCX. Therefore, don't invoke the callback 2374 * if the next class is below SCX (in which case the BPF scheduler has 2375 * actively decided not to schedule any tasks on the CPU). 2376 */ 2377 if (sched_class_above(&ext_sched_class, next_class)) 2378 return; 2379 2380 /* 2381 * At this point we know that SCX was preempted by a higher priority 2382 * sched_class, so invoke the ->cpu_release() callback if we have not 2383 * done so already. We only send the callback once between SCX being 2384 * preempted, and it regaining control of the CPU. 2385 * 2386 * ->cpu_release() complements ->cpu_acquire(), which is emitted the 2387 * next time that balance_one() is invoked. 2388 */ 2389 if (!rq->scx.cpu_released) { 2390 if (SCX_HAS_OP(sch, cpu_release)) { 2391 struct scx_cpu_release_args args = { 2392 .reason = preempt_reason_from_class(next_class), 2393 .task = next, 2394 }; 2395 2396 SCX_CALL_OP(sch, SCX_KF_CPU_RELEASE, cpu_release, rq, 2397 cpu_of(rq), &args); 2398 } 2399 rq->scx.cpu_released = true; 2400 } 2401 } 2402 2403 static void put_prev_task_scx(struct rq *rq, struct task_struct *p, 2404 struct task_struct *next) 2405 { 2406 struct scx_sched *sch = scx_root; 2407 2408 /* see kick_cpus_irq_workfn() */ 2409 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1); 2410 2411 update_curr_scx(rq); 2412 2413 /* see dequeue_task_scx() on why we skip when !QUEUED */ 2414 if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED)) 2415 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, true); 2416 2417 if (p->scx.flags & SCX_TASK_QUEUED) { 2418 set_task_runnable(rq, p); 2419 2420 /* 2421 * If @p has slice left and is being put, @p is getting 2422 * preempted by a higher priority scheduler class or core-sched 2423 * forcing a different task. Leave it at the head of the local 2424 * DSQ. 2425 */ 2426 if (p->scx.slice && !scx_rq_bypassing(rq)) { 2427 dispatch_enqueue(sch, &rq->scx.local_dsq, p, 2428 SCX_ENQ_HEAD); 2429 goto switch_class; 2430 } 2431 2432 /* 2433 * If @p is runnable but we're about to enter a lower 2434 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell 2435 * ops.enqueue() that @p is the only one available for this cpu, 2436 * which should trigger an explicit follow-up scheduling event. 2437 */ 2438 if (next && sched_class_above(&ext_sched_class, next->sched_class)) { 2439 WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST)); 2440 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1); 2441 } else { 2442 do_enqueue_task(rq, p, 0, -1); 2443 } 2444 } 2445 2446 switch_class: 2447 if (next && next->sched_class != &ext_sched_class) 2448 switch_class(rq, next); 2449 } 2450 2451 static struct task_struct *first_local_task(struct rq *rq) 2452 { 2453 return list_first_entry_or_null(&rq->scx.local_dsq.list, 2454 struct task_struct, scx.dsq_list.node); 2455 } 2456 2457 static struct task_struct * 2458 do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx) 2459 { 2460 struct task_struct *prev = rq->curr; 2461 bool keep_prev; 2462 struct task_struct *p; 2463 2464 /* see kick_cpus_irq_workfn() */ 2465 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1); 2466 2467 rq->next_class = &ext_sched_class; 2468 2469 rq_unpin_lock(rq, rf); 2470 balance_one(rq, prev); 2471 rq_repin_lock(rq, rf); 2472 maybe_queue_balance_callback(rq); 2473 2474 /* 2475 * If any higher-priority sched class enqueued a runnable task on 2476 * this rq during balance_one(), abort and return RETRY_TASK, so 2477 * that the scheduler loop can restart. 2478 * 2479 * If @force_scx is true, always try to pick a SCHED_EXT task, 2480 * regardless of any higher-priority sched classes activity. 2481 */ 2482 if (!force_scx && sched_class_above(rq->next_class, &ext_sched_class)) 2483 return RETRY_TASK; 2484 2485 keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP; 2486 if (unlikely(keep_prev && 2487 prev->sched_class != &ext_sched_class)) { 2488 WARN_ON_ONCE(scx_enable_state() == SCX_ENABLED); 2489 keep_prev = false; 2490 } 2491 2492 /* 2493 * If balance_one() is telling us to keep running @prev, replenish slice 2494 * if necessary and keep running @prev. Otherwise, pop the first one 2495 * from the local DSQ. 2496 */ 2497 if (keep_prev) { 2498 p = prev; 2499 if (!p->scx.slice) 2500 refill_task_slice_dfl(rcu_dereference_sched(scx_root), p); 2501 } else { 2502 p = first_local_task(rq); 2503 if (!p) 2504 return NULL; 2505 2506 if (unlikely(!p->scx.slice)) { 2507 struct scx_sched *sch = rcu_dereference_sched(scx_root); 2508 2509 if (!scx_rq_bypassing(rq) && !sch->warned_zero_slice) { 2510 printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n", 2511 p->comm, p->pid, __func__); 2512 sch->warned_zero_slice = true; 2513 } 2514 refill_task_slice_dfl(sch, p); 2515 } 2516 } 2517 2518 return p; 2519 } 2520 2521 static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf) 2522 { 2523 return do_pick_task_scx(rq, rf, false); 2524 } 2525 2526 /* 2527 * Select the next task to run from the ext scheduling class. 2528 * 2529 * Use do_pick_task_scx() directly with @force_scx enabled, since the 2530 * dl_server must always select a sched_ext task. 2531 */ 2532 static struct task_struct * 2533 ext_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf) 2534 { 2535 if (!scx_enabled()) 2536 return NULL; 2537 2538 return do_pick_task_scx(dl_se->rq, rf, true); 2539 } 2540 2541 /* 2542 * Initialize the ext server deadline entity. 2543 */ 2544 void ext_server_init(struct rq *rq) 2545 { 2546 struct sched_dl_entity *dl_se = &rq->ext_server; 2547 2548 init_dl_entity(dl_se); 2549 2550 dl_server_init(dl_se, rq, ext_server_pick_task); 2551 } 2552 2553 #ifdef CONFIG_SCHED_CORE 2554 /** 2555 * scx_prio_less - Task ordering for core-sched 2556 * @a: task A 2557 * @b: task B 2558 * @in_fi: in forced idle state 2559 * 2560 * Core-sched is implemented as an additional scheduling layer on top of the 2561 * usual sched_class'es and needs to find out the expected task ordering. For 2562 * SCX, core-sched calls this function to interrogate the task ordering. 2563 * 2564 * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used 2565 * to implement the default task ordering. The older the timestamp, the higher 2566 * priority the task - the global FIFO ordering matching the default scheduling 2567 * behavior. 2568 * 2569 * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to 2570 * implement FIFO ordering within each local DSQ. See pick_task_scx(). 2571 */ 2572 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b, 2573 bool in_fi) 2574 { 2575 struct scx_sched *sch = scx_root; 2576 2577 /* 2578 * The const qualifiers are dropped from task_struct pointers when 2579 * calling ops.core_sched_before(). Accesses are controlled by the 2580 * verifier. 2581 */ 2582 if (SCX_HAS_OP(sch, core_sched_before) && 2583 !scx_rq_bypassing(task_rq(a))) 2584 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, core_sched_before, 2585 NULL, 2586 (struct task_struct *)a, 2587 (struct task_struct *)b); 2588 else 2589 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at); 2590 } 2591 #endif /* CONFIG_SCHED_CORE */ 2592 2593 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags) 2594 { 2595 struct scx_sched *sch = scx_root; 2596 bool rq_bypass; 2597 2598 /* 2599 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it 2600 * can be a good migration opportunity with low cache and memory 2601 * footprint. Returning a CPU different than @prev_cpu triggers 2602 * immediate rq migration. However, for SCX, as the current rq 2603 * association doesn't dictate where the task is going to run, this 2604 * doesn't fit well. If necessary, we can later add a dedicated method 2605 * which can decide to preempt self to force it through the regular 2606 * scheduling path. 2607 */ 2608 if (unlikely(wake_flags & WF_EXEC)) 2609 return prev_cpu; 2610 2611 rq_bypass = scx_rq_bypassing(task_rq(p)); 2612 if (likely(SCX_HAS_OP(sch, select_cpu)) && !rq_bypass) { 2613 s32 cpu; 2614 struct task_struct **ddsp_taskp; 2615 2616 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task); 2617 WARN_ON_ONCE(*ddsp_taskp); 2618 *ddsp_taskp = p; 2619 2620 cpu = SCX_CALL_OP_TASK_RET(sch, 2621 SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU, 2622 select_cpu, NULL, p, prev_cpu, 2623 wake_flags); 2624 p->scx.selected_cpu = cpu; 2625 *ddsp_taskp = NULL; 2626 if (ops_cpu_valid(sch, cpu, "from ops.select_cpu()")) 2627 return cpu; 2628 else 2629 return prev_cpu; 2630 } else { 2631 s32 cpu; 2632 2633 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0); 2634 if (cpu >= 0) { 2635 refill_task_slice_dfl(sch, p); 2636 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL; 2637 } else { 2638 cpu = prev_cpu; 2639 } 2640 p->scx.selected_cpu = cpu; 2641 2642 if (rq_bypass) 2643 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1); 2644 return cpu; 2645 } 2646 } 2647 2648 static void task_woken_scx(struct rq *rq, struct task_struct *p) 2649 { 2650 run_deferred(rq); 2651 } 2652 2653 static void set_cpus_allowed_scx(struct task_struct *p, 2654 struct affinity_context *ac) 2655 { 2656 struct scx_sched *sch = scx_root; 2657 2658 set_cpus_allowed_common(p, ac); 2659 2660 if (task_dead_and_done(p)) 2661 return; 2662 2663 /* 2664 * The effective cpumask is stored in @p->cpus_ptr which may temporarily 2665 * differ from the configured one in @p->cpus_mask. Always tell the bpf 2666 * scheduler the effective one. 2667 * 2668 * Fine-grained memory write control is enforced by BPF making the const 2669 * designation pointless. Cast it away when calling the operation. 2670 */ 2671 if (SCX_HAS_OP(sch, set_cpumask)) 2672 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, NULL, 2673 p, (struct cpumask *)p->cpus_ptr); 2674 } 2675 2676 static void handle_hotplug(struct rq *rq, bool online) 2677 { 2678 struct scx_sched *sch = scx_root; 2679 int cpu = cpu_of(rq); 2680 2681 atomic_long_inc(&scx_hotplug_seq); 2682 2683 /* 2684 * scx_root updates are protected by cpus_read_lock() and will stay 2685 * stable here. Note that we can't depend on scx_enabled() test as the 2686 * hotplug ops need to be enabled before __scx_enabled is set. 2687 */ 2688 if (unlikely(!sch)) 2689 return; 2690 2691 if (scx_enabled()) 2692 scx_idle_update_selcpu_topology(&sch->ops); 2693 2694 if (online && SCX_HAS_OP(sch, cpu_online)) 2695 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_online, NULL, cpu); 2696 else if (!online && SCX_HAS_OP(sch, cpu_offline)) 2697 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_offline, NULL, cpu); 2698 else 2699 scx_exit(sch, SCX_EXIT_UNREG_KERN, 2700 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG, 2701 "cpu %d going %s, exiting scheduler", cpu, 2702 online ? "online" : "offline"); 2703 } 2704 2705 void scx_rq_activate(struct rq *rq) 2706 { 2707 handle_hotplug(rq, true); 2708 } 2709 2710 void scx_rq_deactivate(struct rq *rq) 2711 { 2712 handle_hotplug(rq, false); 2713 } 2714 2715 static void rq_online_scx(struct rq *rq) 2716 { 2717 rq->scx.flags |= SCX_RQ_ONLINE; 2718 } 2719 2720 static void rq_offline_scx(struct rq *rq) 2721 { 2722 rq->scx.flags &= ~SCX_RQ_ONLINE; 2723 } 2724 2725 2726 static bool check_rq_for_timeouts(struct rq *rq) 2727 { 2728 struct scx_sched *sch; 2729 struct task_struct *p; 2730 struct rq_flags rf; 2731 bool timed_out = false; 2732 2733 rq_lock_irqsave(rq, &rf); 2734 sch = rcu_dereference_bh(scx_root); 2735 if (unlikely(!sch)) 2736 goto out_unlock; 2737 2738 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) { 2739 unsigned long last_runnable = p->scx.runnable_at; 2740 2741 if (unlikely(time_after(jiffies, 2742 last_runnable + READ_ONCE(scx_watchdog_timeout)))) { 2743 u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable); 2744 2745 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0, 2746 "%s[%d] failed to run for %u.%03us", 2747 p->comm, p->pid, dur_ms / 1000, dur_ms % 1000); 2748 timed_out = true; 2749 break; 2750 } 2751 } 2752 out_unlock: 2753 rq_unlock_irqrestore(rq, &rf); 2754 return timed_out; 2755 } 2756 2757 static void scx_watchdog_workfn(struct work_struct *work) 2758 { 2759 int cpu; 2760 2761 WRITE_ONCE(scx_watchdog_timestamp, jiffies); 2762 2763 for_each_online_cpu(cpu) { 2764 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu)))) 2765 break; 2766 2767 cond_resched(); 2768 } 2769 queue_delayed_work(system_unbound_wq, to_delayed_work(work), 2770 READ_ONCE(scx_watchdog_timeout) / 2); 2771 } 2772 2773 void scx_tick(struct rq *rq) 2774 { 2775 struct scx_sched *sch; 2776 unsigned long last_check; 2777 2778 if (!scx_enabled()) 2779 return; 2780 2781 sch = rcu_dereference_bh(scx_root); 2782 if (unlikely(!sch)) 2783 return; 2784 2785 last_check = READ_ONCE(scx_watchdog_timestamp); 2786 if (unlikely(time_after(jiffies, 2787 last_check + READ_ONCE(scx_watchdog_timeout)))) { 2788 u32 dur_ms = jiffies_to_msecs(jiffies - last_check); 2789 2790 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0, 2791 "watchdog failed to check in for %u.%03us", 2792 dur_ms / 1000, dur_ms % 1000); 2793 } 2794 2795 update_other_load_avgs(rq); 2796 } 2797 2798 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued) 2799 { 2800 struct scx_sched *sch = scx_root; 2801 2802 update_curr_scx(rq); 2803 2804 /* 2805 * While disabling, always resched and refresh core-sched timestamp as 2806 * we can't trust the slice management or ops.core_sched_before(). 2807 */ 2808 if (scx_rq_bypassing(rq)) { 2809 curr->scx.slice = 0; 2810 touch_core_sched(rq, curr); 2811 } else if (SCX_HAS_OP(sch, tick)) { 2812 SCX_CALL_OP_TASK(sch, SCX_KF_REST, tick, rq, curr); 2813 } 2814 2815 if (!curr->scx.slice) 2816 resched_curr(rq); 2817 } 2818 2819 #ifdef CONFIG_EXT_GROUP_SCHED 2820 static struct cgroup *tg_cgrp(struct task_group *tg) 2821 { 2822 /* 2823 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup, 2824 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the 2825 * root cgroup. 2826 */ 2827 if (tg && tg->css.cgroup) 2828 return tg->css.cgroup; 2829 else 2830 return &cgrp_dfl_root.cgrp; 2831 } 2832 2833 #define SCX_INIT_TASK_ARGS_CGROUP(tg) .cgroup = tg_cgrp(tg), 2834 2835 #else /* CONFIG_EXT_GROUP_SCHED */ 2836 2837 #define SCX_INIT_TASK_ARGS_CGROUP(tg) 2838 2839 #endif /* CONFIG_EXT_GROUP_SCHED */ 2840 2841 static enum scx_task_state scx_get_task_state(const struct task_struct *p) 2842 { 2843 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT; 2844 } 2845 2846 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state) 2847 { 2848 enum scx_task_state prev_state = scx_get_task_state(p); 2849 bool warn = false; 2850 2851 BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS)); 2852 2853 switch (state) { 2854 case SCX_TASK_NONE: 2855 break; 2856 case SCX_TASK_INIT: 2857 warn = prev_state != SCX_TASK_NONE; 2858 break; 2859 case SCX_TASK_READY: 2860 warn = prev_state == SCX_TASK_NONE; 2861 break; 2862 case SCX_TASK_ENABLED: 2863 warn = prev_state != SCX_TASK_READY; 2864 break; 2865 default: 2866 warn = true; 2867 return; 2868 } 2869 2870 WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]", 2871 prev_state, state, p->comm, p->pid); 2872 2873 p->scx.flags &= ~SCX_TASK_STATE_MASK; 2874 p->scx.flags |= state << SCX_TASK_STATE_SHIFT; 2875 } 2876 2877 static int scx_init_task(struct task_struct *p, struct task_group *tg, bool fork) 2878 { 2879 struct scx_sched *sch = scx_root; 2880 int ret; 2881 2882 p->scx.disallow = false; 2883 2884 if (SCX_HAS_OP(sch, init_task)) { 2885 struct scx_init_task_args args = { 2886 SCX_INIT_TASK_ARGS_CGROUP(tg) 2887 .fork = fork, 2888 }; 2889 2890 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init_task, NULL, 2891 p, &args); 2892 if (unlikely(ret)) { 2893 ret = ops_sanitize_err(sch, "init_task", ret); 2894 return ret; 2895 } 2896 } 2897 2898 scx_set_task_state(p, SCX_TASK_INIT); 2899 2900 if (p->scx.disallow) { 2901 if (!fork) { 2902 struct rq *rq; 2903 struct rq_flags rf; 2904 2905 rq = task_rq_lock(p, &rf); 2906 2907 /* 2908 * We're in the load path and @p->policy will be applied 2909 * right after. Reverting @p->policy here and rejecting 2910 * %SCHED_EXT transitions from scx_check_setscheduler() 2911 * guarantees that if ops.init_task() sets @p->disallow, 2912 * @p can never be in SCX. 2913 */ 2914 if (p->policy == SCHED_EXT) { 2915 p->policy = SCHED_NORMAL; 2916 atomic_long_inc(&scx_nr_rejected); 2917 } 2918 2919 task_rq_unlock(rq, p, &rf); 2920 } else if (p->policy == SCHED_EXT) { 2921 scx_error(sch, "ops.init_task() set task->scx.disallow for %s[%d] during fork", 2922 p->comm, p->pid); 2923 } 2924 } 2925 2926 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; 2927 return 0; 2928 } 2929 2930 static void scx_enable_task(struct task_struct *p) 2931 { 2932 struct scx_sched *sch = scx_root; 2933 struct rq *rq = task_rq(p); 2934 u32 weight; 2935 2936 lockdep_assert_rq_held(rq); 2937 2938 /* 2939 * Set the weight before calling ops.enable() so that the scheduler 2940 * doesn't see a stale value if they inspect the task struct. 2941 */ 2942 if (task_has_idle_policy(p)) 2943 weight = WEIGHT_IDLEPRIO; 2944 else 2945 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO]; 2946 2947 p->scx.weight = sched_weight_to_cgroup(weight); 2948 2949 if (SCX_HAS_OP(sch, enable)) 2950 SCX_CALL_OP_TASK(sch, SCX_KF_REST, enable, rq, p); 2951 scx_set_task_state(p, SCX_TASK_ENABLED); 2952 2953 if (SCX_HAS_OP(sch, set_weight)) 2954 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq, 2955 p, p->scx.weight); 2956 } 2957 2958 static void scx_disable_task(struct task_struct *p) 2959 { 2960 struct scx_sched *sch = scx_root; 2961 struct rq *rq = task_rq(p); 2962 2963 lockdep_assert_rq_held(rq); 2964 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED); 2965 2966 if (SCX_HAS_OP(sch, disable)) 2967 SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p); 2968 scx_set_task_state(p, SCX_TASK_READY); 2969 } 2970 2971 static void scx_exit_task(struct task_struct *p) 2972 { 2973 struct scx_sched *sch = scx_root; 2974 struct scx_exit_task_args args = { 2975 .cancelled = false, 2976 }; 2977 2978 lockdep_assert_rq_held(task_rq(p)); 2979 2980 switch (scx_get_task_state(p)) { 2981 case SCX_TASK_NONE: 2982 return; 2983 case SCX_TASK_INIT: 2984 args.cancelled = true; 2985 break; 2986 case SCX_TASK_READY: 2987 break; 2988 case SCX_TASK_ENABLED: 2989 scx_disable_task(p); 2990 break; 2991 default: 2992 WARN_ON_ONCE(true); 2993 return; 2994 } 2995 2996 if (SCX_HAS_OP(sch, exit_task)) 2997 SCX_CALL_OP_TASK(sch, SCX_KF_REST, exit_task, task_rq(p), 2998 p, &args); 2999 scx_set_task_state(p, SCX_TASK_NONE); 3000 } 3001 3002 void init_scx_entity(struct sched_ext_entity *scx) 3003 { 3004 memset(scx, 0, sizeof(*scx)); 3005 INIT_LIST_HEAD(&scx->dsq_list.node); 3006 RB_CLEAR_NODE(&scx->dsq_priq); 3007 scx->sticky_cpu = -1; 3008 scx->holding_cpu = -1; 3009 INIT_LIST_HEAD(&scx->runnable_node); 3010 scx->runnable_at = jiffies; 3011 scx->ddsp_dsq_id = SCX_DSQ_INVALID; 3012 scx->slice = READ_ONCE(scx_slice_dfl); 3013 } 3014 3015 void scx_pre_fork(struct task_struct *p) 3016 { 3017 /* 3018 * BPF scheduler enable/disable paths want to be able to iterate and 3019 * update all tasks which can become complex when racing forks. As 3020 * enable/disable are very cold paths, let's use a percpu_rwsem to 3021 * exclude forks. 3022 */ 3023 percpu_down_read(&scx_fork_rwsem); 3024 } 3025 3026 int scx_fork(struct task_struct *p) 3027 { 3028 percpu_rwsem_assert_held(&scx_fork_rwsem); 3029 3030 if (scx_init_task_enabled) 3031 return scx_init_task(p, task_group(p), true); 3032 else 3033 return 0; 3034 } 3035 3036 void scx_post_fork(struct task_struct *p) 3037 { 3038 if (scx_init_task_enabled) { 3039 scx_set_task_state(p, SCX_TASK_READY); 3040 3041 /* 3042 * Enable the task immediately if it's running on sched_ext. 3043 * Otherwise, it'll be enabled in switching_to_scx() if and 3044 * when it's ever configured to run with a SCHED_EXT policy. 3045 */ 3046 if (p->sched_class == &ext_sched_class) { 3047 struct rq_flags rf; 3048 struct rq *rq; 3049 3050 rq = task_rq_lock(p, &rf); 3051 scx_enable_task(p); 3052 task_rq_unlock(rq, p, &rf); 3053 } 3054 } 3055 3056 raw_spin_lock_irq(&scx_tasks_lock); 3057 list_add_tail(&p->scx.tasks_node, &scx_tasks); 3058 raw_spin_unlock_irq(&scx_tasks_lock); 3059 3060 percpu_up_read(&scx_fork_rwsem); 3061 } 3062 3063 void scx_cancel_fork(struct task_struct *p) 3064 { 3065 if (scx_enabled()) { 3066 struct rq *rq; 3067 struct rq_flags rf; 3068 3069 rq = task_rq_lock(p, &rf); 3070 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY); 3071 scx_exit_task(p); 3072 task_rq_unlock(rq, p, &rf); 3073 } 3074 3075 percpu_up_read(&scx_fork_rwsem); 3076 } 3077 3078 /** 3079 * task_dead_and_done - Is a task dead and done running? 3080 * @p: target task 3081 * 3082 * Once sched_ext_dead() removes the dead task from scx_tasks and exits it, the 3083 * task no longer exists from SCX's POV. However, certain sched_class ops may be 3084 * invoked on these dead tasks leading to failures - e.g. sched_setscheduler() 3085 * may try to switch a task which finished sched_ext_dead() back into SCX 3086 * triggering invalid SCX task state transitions and worse. 3087 * 3088 * Once a task has finished the final switch, sched_ext_dead() is the only thing 3089 * that needs to happen on the task. Use this test to short-circuit sched_class 3090 * operations which may be called on dead tasks. 3091 */ 3092 static bool task_dead_and_done(struct task_struct *p) 3093 { 3094 struct rq *rq = task_rq(p); 3095 3096 lockdep_assert_rq_held(rq); 3097 3098 /* 3099 * In do_task_dead(), a dying task sets %TASK_DEAD with preemption 3100 * disabled and __schedule(). If @p has %TASK_DEAD set and off CPU, @p 3101 * won't ever run again. 3102 */ 3103 return unlikely(READ_ONCE(p->__state) == TASK_DEAD) && 3104 !task_on_cpu(rq, p); 3105 } 3106 3107 void sched_ext_dead(struct task_struct *p) 3108 { 3109 unsigned long flags; 3110 3111 /* 3112 * By the time control reaches here, @p has %TASK_DEAD set, switched out 3113 * for the last time and then dropped the rq lock - task_dead_and_done() 3114 * should be returning %true nullifying the straggling sched_class ops. 3115 * Remove from scx_tasks and exit @p. 3116 */ 3117 raw_spin_lock_irqsave(&scx_tasks_lock, flags); 3118 list_del_init(&p->scx.tasks_node); 3119 raw_spin_unlock_irqrestore(&scx_tasks_lock, flags); 3120 3121 /* 3122 * @p is off scx_tasks and wholly ours. scx_enable()'s READY -> ENABLED 3123 * transitions can't race us. Disable ops for @p. 3124 */ 3125 if (scx_get_task_state(p) != SCX_TASK_NONE) { 3126 struct rq_flags rf; 3127 struct rq *rq; 3128 3129 rq = task_rq_lock(p, &rf); 3130 scx_exit_task(p); 3131 task_rq_unlock(rq, p, &rf); 3132 } 3133 } 3134 3135 static void reweight_task_scx(struct rq *rq, struct task_struct *p, 3136 const struct load_weight *lw) 3137 { 3138 struct scx_sched *sch = scx_root; 3139 3140 lockdep_assert_rq_held(task_rq(p)); 3141 3142 if (task_dead_and_done(p)) 3143 return; 3144 3145 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight)); 3146 if (SCX_HAS_OP(sch, set_weight)) 3147 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq, 3148 p, p->scx.weight); 3149 } 3150 3151 static void prio_changed_scx(struct rq *rq, struct task_struct *p, u64 oldprio) 3152 { 3153 } 3154 3155 static void switching_to_scx(struct rq *rq, struct task_struct *p) 3156 { 3157 struct scx_sched *sch = scx_root; 3158 3159 if (task_dead_and_done(p)) 3160 return; 3161 3162 scx_enable_task(p); 3163 3164 /* 3165 * set_cpus_allowed_scx() is not called while @p is associated with a 3166 * different scheduler class. Keep the BPF scheduler up-to-date. 3167 */ 3168 if (SCX_HAS_OP(sch, set_cpumask)) 3169 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, rq, 3170 p, (struct cpumask *)p->cpus_ptr); 3171 } 3172 3173 static void switched_from_scx(struct rq *rq, struct task_struct *p) 3174 { 3175 if (task_dead_and_done(p)) 3176 return; 3177 3178 scx_disable_task(p); 3179 } 3180 3181 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p, int wake_flags) {} 3182 3183 static void switched_to_scx(struct rq *rq, struct task_struct *p) {} 3184 3185 int scx_check_setscheduler(struct task_struct *p, int policy) 3186 { 3187 lockdep_assert_rq_held(task_rq(p)); 3188 3189 /* if disallow, reject transitioning into SCX */ 3190 if (scx_enabled() && READ_ONCE(p->scx.disallow) && 3191 p->policy != policy && policy == SCHED_EXT) 3192 return -EACCES; 3193 3194 return 0; 3195 } 3196 3197 #ifdef CONFIG_NO_HZ_FULL 3198 bool scx_can_stop_tick(struct rq *rq) 3199 { 3200 struct task_struct *p = rq->curr; 3201 3202 if (scx_rq_bypassing(rq)) 3203 return false; 3204 3205 if (p->sched_class != &ext_sched_class) 3206 return true; 3207 3208 /* 3209 * @rq can dispatch from different DSQs, so we can't tell whether it 3210 * needs the tick or not by looking at nr_running. Allow stopping ticks 3211 * iff the BPF scheduler indicated so. See set_next_task_scx(). 3212 */ 3213 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK; 3214 } 3215 #endif 3216 3217 #ifdef CONFIG_EXT_GROUP_SCHED 3218 3219 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_ops_rwsem); 3220 static bool scx_cgroup_enabled; 3221 3222 void scx_tg_init(struct task_group *tg) 3223 { 3224 tg->scx.weight = CGROUP_WEIGHT_DFL; 3225 tg->scx.bw_period_us = default_bw_period_us(); 3226 tg->scx.bw_quota_us = RUNTIME_INF; 3227 tg->scx.idle = false; 3228 } 3229 3230 int scx_tg_online(struct task_group *tg) 3231 { 3232 struct scx_sched *sch = scx_root; 3233 int ret = 0; 3234 3235 WARN_ON_ONCE(tg->scx.flags & (SCX_TG_ONLINE | SCX_TG_INITED)); 3236 3237 if (scx_cgroup_enabled) { 3238 if (SCX_HAS_OP(sch, cgroup_init)) { 3239 struct scx_cgroup_init_args args = 3240 { .weight = tg->scx.weight, 3241 .bw_period_us = tg->scx.bw_period_us, 3242 .bw_quota_us = tg->scx.bw_quota_us, 3243 .bw_burst_us = tg->scx.bw_burst_us }; 3244 3245 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, 3246 NULL, tg->css.cgroup, &args); 3247 if (ret) 3248 ret = ops_sanitize_err(sch, "cgroup_init", ret); 3249 } 3250 if (ret == 0) 3251 tg->scx.flags |= SCX_TG_ONLINE | SCX_TG_INITED; 3252 } else { 3253 tg->scx.flags |= SCX_TG_ONLINE; 3254 } 3255 3256 return ret; 3257 } 3258 3259 void scx_tg_offline(struct task_group *tg) 3260 { 3261 struct scx_sched *sch = scx_root; 3262 3263 WARN_ON_ONCE(!(tg->scx.flags & SCX_TG_ONLINE)); 3264 3265 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_exit) && 3266 (tg->scx.flags & SCX_TG_INITED)) 3267 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL, 3268 tg->css.cgroup); 3269 tg->scx.flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED); 3270 } 3271 3272 int scx_cgroup_can_attach(struct cgroup_taskset *tset) 3273 { 3274 struct scx_sched *sch = scx_root; 3275 struct cgroup_subsys_state *css; 3276 struct task_struct *p; 3277 int ret; 3278 3279 if (!scx_cgroup_enabled) 3280 return 0; 3281 3282 cgroup_taskset_for_each(p, css, tset) { 3283 struct cgroup *from = tg_cgrp(task_group(p)); 3284 struct cgroup *to = tg_cgrp(css_tg(css)); 3285 3286 WARN_ON_ONCE(p->scx.cgrp_moving_from); 3287 3288 /* 3289 * sched_move_task() omits identity migrations. Let's match the 3290 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move() 3291 * always match one-to-one. 3292 */ 3293 if (from == to) 3294 continue; 3295 3296 if (SCX_HAS_OP(sch, cgroup_prep_move)) { 3297 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, 3298 cgroup_prep_move, NULL, 3299 p, from, css->cgroup); 3300 if (ret) 3301 goto err; 3302 } 3303 3304 p->scx.cgrp_moving_from = from; 3305 } 3306 3307 return 0; 3308 3309 err: 3310 cgroup_taskset_for_each(p, css, tset) { 3311 if (SCX_HAS_OP(sch, cgroup_cancel_move) && 3312 p->scx.cgrp_moving_from) 3313 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL, 3314 p, p->scx.cgrp_moving_from, css->cgroup); 3315 p->scx.cgrp_moving_from = NULL; 3316 } 3317 3318 return ops_sanitize_err(sch, "cgroup_prep_move", ret); 3319 } 3320 3321 void scx_cgroup_move_task(struct task_struct *p) 3322 { 3323 struct scx_sched *sch = scx_root; 3324 3325 if (!scx_cgroup_enabled) 3326 return; 3327 3328 /* 3329 * @p must have ops.cgroup_prep_move() called on it and thus 3330 * cgrp_moving_from set. 3331 */ 3332 if (SCX_HAS_OP(sch, cgroup_move) && 3333 !WARN_ON_ONCE(!p->scx.cgrp_moving_from)) 3334 SCX_CALL_OP_TASK(sch, SCX_KF_UNLOCKED, cgroup_move, NULL, 3335 p, p->scx.cgrp_moving_from, 3336 tg_cgrp(task_group(p))); 3337 p->scx.cgrp_moving_from = NULL; 3338 } 3339 3340 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) 3341 { 3342 struct scx_sched *sch = scx_root; 3343 struct cgroup_subsys_state *css; 3344 struct task_struct *p; 3345 3346 if (!scx_cgroup_enabled) 3347 return; 3348 3349 cgroup_taskset_for_each(p, css, tset) { 3350 if (SCX_HAS_OP(sch, cgroup_cancel_move) && 3351 p->scx.cgrp_moving_from) 3352 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL, 3353 p, p->scx.cgrp_moving_from, css->cgroup); 3354 p->scx.cgrp_moving_from = NULL; 3355 } 3356 } 3357 3358 void scx_group_set_weight(struct task_group *tg, unsigned long weight) 3359 { 3360 struct scx_sched *sch = scx_root; 3361 3362 percpu_down_read(&scx_cgroup_ops_rwsem); 3363 3364 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) && 3365 tg->scx.weight != weight) 3366 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_weight, NULL, 3367 tg_cgrp(tg), weight); 3368 3369 tg->scx.weight = weight; 3370 3371 percpu_up_read(&scx_cgroup_ops_rwsem); 3372 } 3373 3374 void scx_group_set_idle(struct task_group *tg, bool idle) 3375 { 3376 struct scx_sched *sch = scx_root; 3377 3378 percpu_down_read(&scx_cgroup_ops_rwsem); 3379 3380 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_idle)) 3381 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_idle, NULL, 3382 tg_cgrp(tg), idle); 3383 3384 /* Update the task group's idle state */ 3385 tg->scx.idle = idle; 3386 3387 percpu_up_read(&scx_cgroup_ops_rwsem); 3388 } 3389 3390 void scx_group_set_bandwidth(struct task_group *tg, 3391 u64 period_us, u64 quota_us, u64 burst_us) 3392 { 3393 struct scx_sched *sch = scx_root; 3394 3395 percpu_down_read(&scx_cgroup_ops_rwsem); 3396 3397 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_bandwidth) && 3398 (tg->scx.bw_period_us != period_us || 3399 tg->scx.bw_quota_us != quota_us || 3400 tg->scx.bw_burst_us != burst_us)) 3401 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_bandwidth, NULL, 3402 tg_cgrp(tg), period_us, quota_us, burst_us); 3403 3404 tg->scx.bw_period_us = period_us; 3405 tg->scx.bw_quota_us = quota_us; 3406 tg->scx.bw_burst_us = burst_us; 3407 3408 percpu_up_read(&scx_cgroup_ops_rwsem); 3409 } 3410 3411 static void scx_cgroup_lock(void) 3412 { 3413 percpu_down_write(&scx_cgroup_ops_rwsem); 3414 cgroup_lock(); 3415 } 3416 3417 static void scx_cgroup_unlock(void) 3418 { 3419 cgroup_unlock(); 3420 percpu_up_write(&scx_cgroup_ops_rwsem); 3421 } 3422 3423 #else /* CONFIG_EXT_GROUP_SCHED */ 3424 3425 static void scx_cgroup_lock(void) {} 3426 static void scx_cgroup_unlock(void) {} 3427 3428 #endif /* CONFIG_EXT_GROUP_SCHED */ 3429 3430 /* 3431 * Omitted operations: 3432 * 3433 * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task 3434 * isn't tied to the CPU at that point. Preemption is implemented by resetting 3435 * the victim task's slice to 0 and triggering reschedule on the target CPU. 3436 * 3437 * - migrate_task_rq: Unnecessary as task to cpu mapping is transient. 3438 * 3439 * - task_fork/dead: We need fork/dead notifications for all tasks regardless of 3440 * their current sched_class. Call them directly from sched core instead. 3441 */ 3442 DEFINE_SCHED_CLASS(ext) = { 3443 .enqueue_task = enqueue_task_scx, 3444 .dequeue_task = dequeue_task_scx, 3445 .yield_task = yield_task_scx, 3446 .yield_to_task = yield_to_task_scx, 3447 3448 .wakeup_preempt = wakeup_preempt_scx, 3449 3450 .pick_task = pick_task_scx, 3451 3452 .put_prev_task = put_prev_task_scx, 3453 .set_next_task = set_next_task_scx, 3454 3455 .select_task_rq = select_task_rq_scx, 3456 .task_woken = task_woken_scx, 3457 .set_cpus_allowed = set_cpus_allowed_scx, 3458 3459 .rq_online = rq_online_scx, 3460 .rq_offline = rq_offline_scx, 3461 3462 .task_tick = task_tick_scx, 3463 3464 .switching_to = switching_to_scx, 3465 .switched_from = switched_from_scx, 3466 .switched_to = switched_to_scx, 3467 .reweight_task = reweight_task_scx, 3468 .prio_changed = prio_changed_scx, 3469 3470 .update_curr = update_curr_scx, 3471 3472 #ifdef CONFIG_UCLAMP_TASK 3473 .uclamp_enabled = 1, 3474 #endif 3475 }; 3476 3477 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id) 3478 { 3479 memset(dsq, 0, sizeof(*dsq)); 3480 3481 raw_spin_lock_init(&dsq->lock); 3482 INIT_LIST_HEAD(&dsq->list); 3483 dsq->id = dsq_id; 3484 } 3485 3486 static void free_dsq_irq_workfn(struct irq_work *irq_work) 3487 { 3488 struct llist_node *to_free = llist_del_all(&dsqs_to_free); 3489 struct scx_dispatch_q *dsq, *tmp_dsq; 3490 3491 llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node) 3492 kfree_rcu(dsq, rcu); 3493 } 3494 3495 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn); 3496 3497 static void destroy_dsq(struct scx_sched *sch, u64 dsq_id) 3498 { 3499 struct scx_dispatch_q *dsq; 3500 unsigned long flags; 3501 3502 rcu_read_lock(); 3503 3504 dsq = find_user_dsq(sch, dsq_id); 3505 if (!dsq) 3506 goto out_unlock_rcu; 3507 3508 raw_spin_lock_irqsave(&dsq->lock, flags); 3509 3510 if (dsq->nr) { 3511 scx_error(sch, "attempting to destroy in-use dsq 0x%016llx (nr=%u)", 3512 dsq->id, dsq->nr); 3513 goto out_unlock_dsq; 3514 } 3515 3516 if (rhashtable_remove_fast(&sch->dsq_hash, &dsq->hash_node, 3517 dsq_hash_params)) 3518 goto out_unlock_dsq; 3519 3520 /* 3521 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from 3522 * queueing more tasks. As this function can be called from anywhere, 3523 * freeing is bounced through an irq work to avoid nesting RCU 3524 * operations inside scheduler locks. 3525 */ 3526 dsq->id = SCX_DSQ_INVALID; 3527 if (llist_add(&dsq->free_node, &dsqs_to_free)) 3528 irq_work_queue(&free_dsq_irq_work); 3529 3530 out_unlock_dsq: 3531 raw_spin_unlock_irqrestore(&dsq->lock, flags); 3532 out_unlock_rcu: 3533 rcu_read_unlock(); 3534 } 3535 3536 #ifdef CONFIG_EXT_GROUP_SCHED 3537 static void scx_cgroup_exit(struct scx_sched *sch) 3538 { 3539 struct cgroup_subsys_state *css; 3540 3541 scx_cgroup_enabled = false; 3542 3543 /* 3544 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk 3545 * cgroups and exit all the inited ones, all online cgroups are exited. 3546 */ 3547 css_for_each_descendant_post(css, &root_task_group.css) { 3548 struct task_group *tg = css_tg(css); 3549 3550 if (!(tg->scx.flags & SCX_TG_INITED)) 3551 continue; 3552 tg->scx.flags &= ~SCX_TG_INITED; 3553 3554 if (!sch->ops.cgroup_exit) 3555 continue; 3556 3557 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL, 3558 css->cgroup); 3559 } 3560 } 3561 3562 static int scx_cgroup_init(struct scx_sched *sch) 3563 { 3564 struct cgroup_subsys_state *css; 3565 int ret; 3566 3567 /* 3568 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk 3569 * cgroups and init, all online cgroups are initialized. 3570 */ 3571 css_for_each_descendant_pre(css, &root_task_group.css) { 3572 struct task_group *tg = css_tg(css); 3573 struct scx_cgroup_init_args args = { 3574 .weight = tg->scx.weight, 3575 .bw_period_us = tg->scx.bw_period_us, 3576 .bw_quota_us = tg->scx.bw_quota_us, 3577 .bw_burst_us = tg->scx.bw_burst_us, 3578 }; 3579 3580 if ((tg->scx.flags & 3581 (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE) 3582 continue; 3583 3584 if (!sch->ops.cgroup_init) { 3585 tg->scx.flags |= SCX_TG_INITED; 3586 continue; 3587 } 3588 3589 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, NULL, 3590 css->cgroup, &args); 3591 if (ret) { 3592 css_put(css); 3593 scx_error(sch, "ops.cgroup_init() failed (%d)", ret); 3594 return ret; 3595 } 3596 tg->scx.flags |= SCX_TG_INITED; 3597 } 3598 3599 WARN_ON_ONCE(scx_cgroup_enabled); 3600 scx_cgroup_enabled = true; 3601 3602 return 0; 3603 } 3604 3605 #else 3606 static void scx_cgroup_exit(struct scx_sched *sch) {} 3607 static int scx_cgroup_init(struct scx_sched *sch) { return 0; } 3608 #endif 3609 3610 3611 /******************************************************************************** 3612 * Sysfs interface and ops enable/disable. 3613 */ 3614 3615 #define SCX_ATTR(_name) \ 3616 static struct kobj_attribute scx_attr_##_name = { \ 3617 .attr = { .name = __stringify(_name), .mode = 0444 }, \ 3618 .show = scx_attr_##_name##_show, \ 3619 } 3620 3621 static ssize_t scx_attr_state_show(struct kobject *kobj, 3622 struct kobj_attribute *ka, char *buf) 3623 { 3624 return sysfs_emit(buf, "%s\n", scx_enable_state_str[scx_enable_state()]); 3625 } 3626 SCX_ATTR(state); 3627 3628 static ssize_t scx_attr_switch_all_show(struct kobject *kobj, 3629 struct kobj_attribute *ka, char *buf) 3630 { 3631 return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all)); 3632 } 3633 SCX_ATTR(switch_all); 3634 3635 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj, 3636 struct kobj_attribute *ka, char *buf) 3637 { 3638 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected)); 3639 } 3640 SCX_ATTR(nr_rejected); 3641 3642 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj, 3643 struct kobj_attribute *ka, char *buf) 3644 { 3645 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq)); 3646 } 3647 SCX_ATTR(hotplug_seq); 3648 3649 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj, 3650 struct kobj_attribute *ka, char *buf) 3651 { 3652 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq)); 3653 } 3654 SCX_ATTR(enable_seq); 3655 3656 static struct attribute *scx_global_attrs[] = { 3657 &scx_attr_state.attr, 3658 &scx_attr_switch_all.attr, 3659 &scx_attr_nr_rejected.attr, 3660 &scx_attr_hotplug_seq.attr, 3661 &scx_attr_enable_seq.attr, 3662 NULL, 3663 }; 3664 3665 static const struct attribute_group scx_global_attr_group = { 3666 .attrs = scx_global_attrs, 3667 }; 3668 3669 static void free_exit_info(struct scx_exit_info *ei); 3670 3671 static void scx_sched_free_rcu_work(struct work_struct *work) 3672 { 3673 struct rcu_work *rcu_work = to_rcu_work(work); 3674 struct scx_sched *sch = container_of(rcu_work, struct scx_sched, rcu_work); 3675 struct rhashtable_iter rht_iter; 3676 struct scx_dispatch_q *dsq; 3677 int node; 3678 3679 irq_work_sync(&sch->error_irq_work); 3680 kthread_destroy_worker(sch->helper); 3681 3682 free_percpu(sch->pcpu); 3683 3684 for_each_node_state(node, N_POSSIBLE) 3685 kfree(sch->global_dsqs[node]); 3686 kfree(sch->global_dsqs); 3687 3688 rhashtable_walk_enter(&sch->dsq_hash, &rht_iter); 3689 do { 3690 rhashtable_walk_start(&rht_iter); 3691 3692 while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq)) 3693 destroy_dsq(sch, dsq->id); 3694 3695 rhashtable_walk_stop(&rht_iter); 3696 } while (dsq == ERR_PTR(-EAGAIN)); 3697 rhashtable_walk_exit(&rht_iter); 3698 3699 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL); 3700 free_exit_info(sch->exit_info); 3701 kfree(sch); 3702 } 3703 3704 static void scx_kobj_release(struct kobject *kobj) 3705 { 3706 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj); 3707 3708 INIT_RCU_WORK(&sch->rcu_work, scx_sched_free_rcu_work); 3709 queue_rcu_work(system_unbound_wq, &sch->rcu_work); 3710 } 3711 3712 static ssize_t scx_attr_ops_show(struct kobject *kobj, 3713 struct kobj_attribute *ka, char *buf) 3714 { 3715 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj); 3716 3717 return sysfs_emit(buf, "%s\n", sch->ops.name); 3718 } 3719 SCX_ATTR(ops); 3720 3721 #define scx_attr_event_show(buf, at, events, kind) ({ \ 3722 sysfs_emit_at(buf, at, "%s %llu\n", #kind, (events)->kind); \ 3723 }) 3724 3725 static ssize_t scx_attr_events_show(struct kobject *kobj, 3726 struct kobj_attribute *ka, char *buf) 3727 { 3728 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj); 3729 struct scx_event_stats events; 3730 int at = 0; 3731 3732 scx_read_events(sch, &events); 3733 at += scx_attr_event_show(buf, at, &events, SCX_EV_SELECT_CPU_FALLBACK); 3734 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE); 3735 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_KEEP_LAST); 3736 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_EXITING); 3737 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED); 3738 at += scx_attr_event_show(buf, at, &events, SCX_EV_REFILL_SLICE_DFL); 3739 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DURATION); 3740 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DISPATCH); 3741 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_ACTIVATE); 3742 return at; 3743 } 3744 SCX_ATTR(events); 3745 3746 static struct attribute *scx_sched_attrs[] = { 3747 &scx_attr_ops.attr, 3748 &scx_attr_events.attr, 3749 NULL, 3750 }; 3751 ATTRIBUTE_GROUPS(scx_sched); 3752 3753 static const struct kobj_type scx_ktype = { 3754 .release = scx_kobj_release, 3755 .sysfs_ops = &kobj_sysfs_ops, 3756 .default_groups = scx_sched_groups, 3757 }; 3758 3759 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) 3760 { 3761 const struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj); 3762 3763 return add_uevent_var(env, "SCXOPS=%s", sch->ops.name); 3764 } 3765 3766 static const struct kset_uevent_ops scx_uevent_ops = { 3767 .uevent = scx_uevent, 3768 }; 3769 3770 /* 3771 * Used by sched_fork() and __setscheduler_prio() to pick the matching 3772 * sched_class. dl/rt are already handled. 3773 */ 3774 bool task_should_scx(int policy) 3775 { 3776 if (!scx_enabled() || unlikely(scx_enable_state() == SCX_DISABLING)) 3777 return false; 3778 if (READ_ONCE(scx_switching_all)) 3779 return true; 3780 return policy == SCHED_EXT; 3781 } 3782 3783 bool scx_allow_ttwu_queue(const struct task_struct *p) 3784 { 3785 struct scx_sched *sch; 3786 3787 if (!scx_enabled()) 3788 return true; 3789 3790 sch = rcu_dereference_sched(scx_root); 3791 if (unlikely(!sch)) 3792 return true; 3793 3794 if (sch->ops.flags & SCX_OPS_ALLOW_QUEUED_WAKEUP) 3795 return true; 3796 3797 if (unlikely(p->sched_class != &ext_sched_class)) 3798 return true; 3799 3800 return false; 3801 } 3802 3803 /** 3804 * handle_lockup - sched_ext common lockup handler 3805 * @fmt: format string 3806 * 3807 * Called on system stall or lockup condition and initiates abort of sched_ext 3808 * if enabled, which may resolve the reported lockup. 3809 * 3810 * Returns %true if sched_ext is enabled and abort was initiated, which may 3811 * resolve the lockup. %false if sched_ext is not enabled or abort was already 3812 * initiated by someone else. 3813 */ 3814 static __printf(1, 2) bool handle_lockup(const char *fmt, ...) 3815 { 3816 struct scx_sched *sch; 3817 va_list args; 3818 bool ret; 3819 3820 guard(rcu)(); 3821 3822 sch = rcu_dereference(scx_root); 3823 if (unlikely(!sch)) 3824 return false; 3825 3826 switch (scx_enable_state()) { 3827 case SCX_ENABLING: 3828 case SCX_ENABLED: 3829 va_start(args, fmt); 3830 ret = scx_verror(sch, fmt, args); 3831 va_end(args); 3832 return ret; 3833 default: 3834 return false; 3835 } 3836 } 3837 3838 /** 3839 * scx_rcu_cpu_stall - sched_ext RCU CPU stall handler 3840 * 3841 * While there are various reasons why RCU CPU stalls can occur on a system 3842 * that may not be caused by the current BPF scheduler, try kicking out the 3843 * current scheduler in an attempt to recover the system to a good state before 3844 * issuing panics. 3845 * 3846 * Returns %true if sched_ext is enabled and abort was initiated, which may 3847 * resolve the reported RCU stall. %false if sched_ext is not enabled or someone 3848 * else already initiated abort. 3849 */ 3850 bool scx_rcu_cpu_stall(void) 3851 { 3852 return handle_lockup("RCU CPU stall detected!"); 3853 } 3854 3855 /** 3856 * scx_softlockup - sched_ext softlockup handler 3857 * @dur_s: number of seconds of CPU stuck due to soft lockup 3858 * 3859 * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can 3860 * live-lock the system by making many CPUs target the same DSQ to the point 3861 * where soft-lockup detection triggers. This function is called from 3862 * soft-lockup watchdog when the triggering point is close and tries to unjam 3863 * the system and aborting the BPF scheduler. 3864 */ 3865 void scx_softlockup(u32 dur_s) 3866 { 3867 if (!handle_lockup("soft lockup - CPU %d stuck for %us", smp_processor_id(), dur_s)) 3868 return; 3869 3870 printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU %d stuck for %us, disabling BPF scheduler\n", 3871 smp_processor_id(), dur_s); 3872 } 3873 3874 /** 3875 * scx_hardlockup - sched_ext hardlockup handler 3876 * 3877 * A poorly behaving BPF scheduler can trigger hard lockup by e.g. putting 3878 * numerous affinitized tasks in a single queue and directing all CPUs at it. 3879 * Try kicking out the current scheduler in an attempt to recover the system to 3880 * a good state before taking more drastic actions. 3881 * 3882 * Returns %true if sched_ext is enabled and abort was initiated, which may 3883 * resolve the reported hardlockdup. %false if sched_ext is not enabled or 3884 * someone else already initiated abort. 3885 */ 3886 bool scx_hardlockup(int cpu) 3887 { 3888 if (!handle_lockup("hard lockup - CPU %d", cpu)) 3889 return false; 3890 3891 printk_deferred(KERN_ERR "sched_ext: Hard lockup - CPU %d, disabling BPF scheduler\n", 3892 cpu); 3893 return true; 3894 } 3895 3896 static u32 bypass_lb_cpu(struct scx_sched *sch, struct rq *rq, 3897 struct cpumask *donee_mask, struct cpumask *resched_mask, 3898 u32 nr_donor_target, u32 nr_donee_target) 3899 { 3900 struct scx_dispatch_q *donor_dsq = &rq->scx.bypass_dsq; 3901 struct task_struct *p, *n; 3902 struct scx_dsq_list_node cursor = INIT_DSQ_LIST_CURSOR(cursor, 0, 0); 3903 s32 delta = READ_ONCE(donor_dsq->nr) - nr_donor_target; 3904 u32 nr_balanced = 0, min_delta_us; 3905 3906 /* 3907 * All we want to guarantee is reasonable forward progress. No reason to 3908 * fine tune. Assuming every task on @donor_dsq runs their full slice, 3909 * consider offloading iff the total queued duration is over the 3910 * threshold. 3911 */ 3912 min_delta_us = scx_bypass_lb_intv_us / SCX_BYPASS_LB_MIN_DELTA_DIV; 3913 if (delta < DIV_ROUND_UP(min_delta_us, scx_slice_bypass_us)) 3914 return 0; 3915 3916 raw_spin_rq_lock_irq(rq); 3917 raw_spin_lock(&donor_dsq->lock); 3918 list_add(&cursor.node, &donor_dsq->list); 3919 resume: 3920 n = container_of(&cursor, struct task_struct, scx.dsq_list); 3921 n = nldsq_next_task(donor_dsq, n, false); 3922 3923 while ((p = n)) { 3924 struct rq *donee_rq; 3925 struct scx_dispatch_q *donee_dsq; 3926 int donee; 3927 3928 n = nldsq_next_task(donor_dsq, n, false); 3929 3930 if (donor_dsq->nr <= nr_donor_target) 3931 break; 3932 3933 if (cpumask_empty(donee_mask)) 3934 break; 3935 3936 donee = cpumask_any_and_distribute(donee_mask, p->cpus_ptr); 3937 if (donee >= nr_cpu_ids) 3938 continue; 3939 3940 donee_rq = cpu_rq(donee); 3941 donee_dsq = &donee_rq->scx.bypass_dsq; 3942 3943 /* 3944 * $p's rq is not locked but $p's DSQ lock protects its 3945 * scheduling properties making this test safe. 3946 */ 3947 if (!task_can_run_on_remote_rq(sch, p, donee_rq, false)) 3948 continue; 3949 3950 /* 3951 * Moving $p from one non-local DSQ to another. The source rq 3952 * and DSQ are already locked. Do an abbreviated dequeue and 3953 * then perform enqueue without unlocking $donor_dsq. 3954 * 3955 * We don't want to drop and reacquire the lock on each 3956 * iteration as @donor_dsq can be very long and potentially 3957 * highly contended. Donee DSQs are less likely to be contended. 3958 * The nested locking is safe as only this LB moves tasks 3959 * between bypass DSQs. 3960 */ 3961 dispatch_dequeue_locked(p, donor_dsq); 3962 dispatch_enqueue(sch, donee_dsq, p, SCX_ENQ_NESTED); 3963 3964 /* 3965 * $donee might have been idle and need to be woken up. No need 3966 * to be clever. Kick every CPU that receives tasks. 3967 */ 3968 cpumask_set_cpu(donee, resched_mask); 3969 3970 if (READ_ONCE(donee_dsq->nr) >= nr_donee_target) 3971 cpumask_clear_cpu(donee, donee_mask); 3972 3973 nr_balanced++; 3974 if (!(nr_balanced % SCX_BYPASS_LB_BATCH) && n) { 3975 list_move_tail(&cursor.node, &n->scx.dsq_list.node); 3976 raw_spin_unlock(&donor_dsq->lock); 3977 raw_spin_rq_unlock_irq(rq); 3978 cpu_relax(); 3979 raw_spin_rq_lock_irq(rq); 3980 raw_spin_lock(&donor_dsq->lock); 3981 goto resume; 3982 } 3983 } 3984 3985 list_del_init(&cursor.node); 3986 raw_spin_unlock(&donor_dsq->lock); 3987 raw_spin_rq_unlock_irq(rq); 3988 3989 return nr_balanced; 3990 } 3991 3992 static void bypass_lb_node(struct scx_sched *sch, int node) 3993 { 3994 const struct cpumask *node_mask = cpumask_of_node(node); 3995 struct cpumask *donee_mask = scx_bypass_lb_donee_cpumask; 3996 struct cpumask *resched_mask = scx_bypass_lb_resched_cpumask; 3997 u32 nr_tasks = 0, nr_cpus = 0, nr_balanced = 0; 3998 u32 nr_target, nr_donor_target; 3999 u32 before_min = U32_MAX, before_max = 0; 4000 u32 after_min = U32_MAX, after_max = 0; 4001 int cpu; 4002 4003 /* count the target tasks and CPUs */ 4004 for_each_cpu_and(cpu, cpu_online_mask, node_mask) { 4005 u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr); 4006 4007 nr_tasks += nr; 4008 nr_cpus++; 4009 4010 before_min = min(nr, before_min); 4011 before_max = max(nr, before_max); 4012 } 4013 4014 if (!nr_cpus) 4015 return; 4016 4017 /* 4018 * We don't want CPUs to have more than $nr_donor_target tasks and 4019 * balancing to fill donee CPUs upto $nr_target. Once targets are 4020 * calculated, find the donee CPUs. 4021 */ 4022 nr_target = DIV_ROUND_UP(nr_tasks, nr_cpus); 4023 nr_donor_target = DIV_ROUND_UP(nr_target * SCX_BYPASS_LB_DONOR_PCT, 100); 4024 4025 cpumask_clear(donee_mask); 4026 for_each_cpu_and(cpu, cpu_online_mask, node_mask) { 4027 if (READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr) < nr_target) 4028 cpumask_set_cpu(cpu, donee_mask); 4029 } 4030 4031 /* iterate !donee CPUs and see if they should be offloaded */ 4032 cpumask_clear(resched_mask); 4033 for_each_cpu_and(cpu, cpu_online_mask, node_mask) { 4034 struct rq *rq = cpu_rq(cpu); 4035 struct scx_dispatch_q *donor_dsq = &rq->scx.bypass_dsq; 4036 4037 if (cpumask_empty(donee_mask)) 4038 break; 4039 if (cpumask_test_cpu(cpu, donee_mask)) 4040 continue; 4041 if (READ_ONCE(donor_dsq->nr) <= nr_donor_target) 4042 continue; 4043 4044 nr_balanced += bypass_lb_cpu(sch, rq, donee_mask, resched_mask, 4045 nr_donor_target, nr_target); 4046 } 4047 4048 for_each_cpu(cpu, resched_mask) 4049 resched_cpu(cpu); 4050 4051 for_each_cpu_and(cpu, cpu_online_mask, node_mask) { 4052 u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr); 4053 4054 after_min = min(nr, after_min); 4055 after_max = max(nr, after_max); 4056 4057 } 4058 4059 trace_sched_ext_bypass_lb(node, nr_cpus, nr_tasks, nr_balanced, 4060 before_min, before_max, after_min, after_max); 4061 } 4062 4063 /* 4064 * In bypass mode, all tasks are put on the per-CPU bypass DSQs. If the machine 4065 * is over-saturated and the BPF scheduler skewed tasks into few CPUs, some 4066 * bypass DSQs can be overloaded. If there are enough tasks to saturate other 4067 * lightly loaded CPUs, such imbalance can lead to very high execution latency 4068 * on the overloaded CPUs and thus to hung tasks and RCU stalls. To avoid such 4069 * outcomes, a simple load balancing mechanism is implemented by the following 4070 * timer which runs periodically while bypass mode is in effect. 4071 */ 4072 static void scx_bypass_lb_timerfn(struct timer_list *timer) 4073 { 4074 struct scx_sched *sch; 4075 int node; 4076 u32 intv_us; 4077 4078 sch = rcu_dereference_all(scx_root); 4079 if (unlikely(!sch) || !READ_ONCE(scx_bypass_depth)) 4080 return; 4081 4082 for_each_node_with_cpus(node) 4083 bypass_lb_node(sch, node); 4084 4085 intv_us = READ_ONCE(scx_bypass_lb_intv_us); 4086 if (intv_us) 4087 mod_timer(timer, jiffies + usecs_to_jiffies(intv_us)); 4088 } 4089 4090 static DEFINE_TIMER(scx_bypass_lb_timer, scx_bypass_lb_timerfn); 4091 4092 /** 4093 * scx_bypass - [Un]bypass scx_ops and guarantee forward progress 4094 * @bypass: true for bypass, false for unbypass 4095 * 4096 * Bypassing guarantees that all runnable tasks make forward progress without 4097 * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might 4098 * be held by tasks that the BPF scheduler is forgetting to run, which 4099 * unfortunately also excludes toggling the static branches. 4100 * 4101 * Let's work around by overriding a couple ops and modifying behaviors based on 4102 * the DISABLING state and then cycling the queued tasks through dequeue/enqueue 4103 * to force global FIFO scheduling. 4104 * 4105 * - ops.select_cpu() is ignored and the default select_cpu() is used. 4106 * 4107 * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order. 4108 * %SCX_OPS_ENQ_LAST is also ignored. 4109 * 4110 * - ops.dispatch() is ignored. 4111 * 4112 * - balance_one() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice 4113 * can't be trusted. Whenever a tick triggers, the running task is rotated to 4114 * the tail of the queue with core_sched_at touched. 4115 * 4116 * - pick_next_task() suppresses zero slice warning. 4117 * 4118 * - scx_kick_cpu() is disabled to avoid irq_work malfunction during PM 4119 * operations. 4120 * 4121 * - scx_prio_less() reverts to the default core_sched_at order. 4122 */ 4123 static void scx_bypass(bool bypass) 4124 { 4125 static DEFINE_RAW_SPINLOCK(bypass_lock); 4126 static unsigned long bypass_timestamp; 4127 struct scx_sched *sch; 4128 unsigned long flags; 4129 int cpu; 4130 4131 raw_spin_lock_irqsave(&bypass_lock, flags); 4132 sch = rcu_dereference_bh(scx_root); 4133 4134 if (bypass) { 4135 u32 intv_us; 4136 4137 WRITE_ONCE(scx_bypass_depth, scx_bypass_depth + 1); 4138 WARN_ON_ONCE(scx_bypass_depth <= 0); 4139 if (scx_bypass_depth != 1) 4140 goto unlock; 4141 WRITE_ONCE(scx_slice_dfl, scx_slice_bypass_us * NSEC_PER_USEC); 4142 bypass_timestamp = ktime_get_ns(); 4143 if (sch) 4144 scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1); 4145 4146 intv_us = READ_ONCE(scx_bypass_lb_intv_us); 4147 if (intv_us && !timer_pending(&scx_bypass_lb_timer)) { 4148 scx_bypass_lb_timer.expires = 4149 jiffies + usecs_to_jiffies(intv_us); 4150 add_timer_global(&scx_bypass_lb_timer); 4151 } 4152 } else { 4153 WRITE_ONCE(scx_bypass_depth, scx_bypass_depth - 1); 4154 WARN_ON_ONCE(scx_bypass_depth < 0); 4155 if (scx_bypass_depth != 0) 4156 goto unlock; 4157 WRITE_ONCE(scx_slice_dfl, SCX_SLICE_DFL); 4158 if (sch) 4159 scx_add_event(sch, SCX_EV_BYPASS_DURATION, 4160 ktime_get_ns() - bypass_timestamp); 4161 } 4162 4163 /* 4164 * No task property is changing. We just need to make sure all currently 4165 * queued tasks are re-queued according to the new scx_rq_bypassing() 4166 * state. As an optimization, walk each rq's runnable_list instead of 4167 * the scx_tasks list. 4168 * 4169 * This function can't trust the scheduler and thus can't use 4170 * cpus_read_lock(). Walk all possible CPUs instead of online. 4171 */ 4172 for_each_possible_cpu(cpu) { 4173 struct rq *rq = cpu_rq(cpu); 4174 struct task_struct *p, *n; 4175 4176 raw_spin_rq_lock(rq); 4177 4178 if (bypass) { 4179 WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING); 4180 rq->scx.flags |= SCX_RQ_BYPASSING; 4181 } else { 4182 WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING)); 4183 rq->scx.flags &= ~SCX_RQ_BYPASSING; 4184 } 4185 4186 /* 4187 * We need to guarantee that no tasks are on the BPF scheduler 4188 * while bypassing. Either we see enabled or the enable path 4189 * sees scx_rq_bypassing() before moving tasks to SCX. 4190 */ 4191 if (!scx_enabled()) { 4192 raw_spin_rq_unlock(rq); 4193 continue; 4194 } 4195 4196 /* 4197 * The use of list_for_each_entry_safe_reverse() is required 4198 * because each task is going to be removed from and added back 4199 * to the runnable_list during iteration. Because they're added 4200 * to the tail of the list, safe reverse iteration can still 4201 * visit all nodes. 4202 */ 4203 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list, 4204 scx.runnable_node) { 4205 /* cycling deq/enq is enough, see the function comment */ 4206 scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_MOVE) { 4207 /* nothing */ ; 4208 } 4209 } 4210 4211 /* resched to restore ticks and idle state */ 4212 if (cpu_online(cpu) || cpu == smp_processor_id()) 4213 resched_curr(rq); 4214 4215 raw_spin_rq_unlock(rq); 4216 } 4217 4218 unlock: 4219 raw_spin_unlock_irqrestore(&bypass_lock, flags); 4220 } 4221 4222 static void free_exit_info(struct scx_exit_info *ei) 4223 { 4224 kvfree(ei->dump); 4225 kfree(ei->msg); 4226 kfree(ei->bt); 4227 kfree(ei); 4228 } 4229 4230 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len) 4231 { 4232 struct scx_exit_info *ei; 4233 4234 ei = kzalloc(sizeof(*ei), GFP_KERNEL); 4235 if (!ei) 4236 return NULL; 4237 4238 ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL); 4239 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL); 4240 ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL); 4241 4242 if (!ei->bt || !ei->msg || !ei->dump) { 4243 free_exit_info(ei); 4244 return NULL; 4245 } 4246 4247 return ei; 4248 } 4249 4250 static const char *scx_exit_reason(enum scx_exit_kind kind) 4251 { 4252 switch (kind) { 4253 case SCX_EXIT_UNREG: 4254 return "unregistered from user space"; 4255 case SCX_EXIT_UNREG_BPF: 4256 return "unregistered from BPF"; 4257 case SCX_EXIT_UNREG_KERN: 4258 return "unregistered from the main kernel"; 4259 case SCX_EXIT_SYSRQ: 4260 return "disabled by sysrq-S"; 4261 case SCX_EXIT_ERROR: 4262 return "runtime error"; 4263 case SCX_EXIT_ERROR_BPF: 4264 return "scx_bpf_error"; 4265 case SCX_EXIT_ERROR_STALL: 4266 return "runnable task stall"; 4267 default: 4268 return "<UNKNOWN>"; 4269 } 4270 } 4271 4272 static void free_kick_syncs(void) 4273 { 4274 int cpu; 4275 4276 for_each_possible_cpu(cpu) { 4277 struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu); 4278 struct scx_kick_syncs *to_free; 4279 4280 to_free = rcu_replace_pointer(*ksyncs, NULL, true); 4281 if (to_free) 4282 kvfree_rcu(to_free, rcu); 4283 } 4284 } 4285 4286 static void scx_disable_workfn(struct kthread_work *work) 4287 { 4288 struct scx_sched *sch = container_of(work, struct scx_sched, disable_work); 4289 struct scx_exit_info *ei = sch->exit_info; 4290 struct scx_task_iter sti; 4291 struct task_struct *p; 4292 int kind, cpu; 4293 4294 kind = atomic_read(&sch->exit_kind); 4295 while (true) { 4296 if (kind == SCX_EXIT_DONE) /* already disabled? */ 4297 return; 4298 WARN_ON_ONCE(kind == SCX_EXIT_NONE); 4299 if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE)) 4300 break; 4301 } 4302 ei->kind = kind; 4303 ei->reason = scx_exit_reason(ei->kind); 4304 4305 /* guarantee forward progress by bypassing scx_ops */ 4306 scx_bypass(true); 4307 WRITE_ONCE(scx_aborting, false); 4308 4309 switch (scx_set_enable_state(SCX_DISABLING)) { 4310 case SCX_DISABLING: 4311 WARN_ONCE(true, "sched_ext: duplicate disabling instance?"); 4312 break; 4313 case SCX_DISABLED: 4314 pr_warn("sched_ext: ops error detected without ops (%s)\n", 4315 sch->exit_info->msg); 4316 WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING); 4317 goto done; 4318 default: 4319 break; 4320 } 4321 4322 /* 4323 * Here, every runnable task is guaranteed to make forward progress and 4324 * we can safely use blocking synchronization constructs. Actually 4325 * disable ops. 4326 */ 4327 mutex_lock(&scx_enable_mutex); 4328 4329 static_branch_disable(&__scx_switched_all); 4330 WRITE_ONCE(scx_switching_all, false); 4331 4332 /* 4333 * Shut down cgroup support before tasks so that the cgroup attach path 4334 * doesn't race against scx_exit_task(). 4335 */ 4336 scx_cgroup_lock(); 4337 scx_cgroup_exit(sch); 4338 scx_cgroup_unlock(); 4339 4340 /* 4341 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones 4342 * must be switched out and exited synchronously. 4343 */ 4344 percpu_down_write(&scx_fork_rwsem); 4345 4346 scx_init_task_enabled = false; 4347 4348 scx_task_iter_start(&sti); 4349 while ((p = scx_task_iter_next_locked(&sti))) { 4350 unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 4351 const struct sched_class *old_class = p->sched_class; 4352 const struct sched_class *new_class = scx_setscheduler_class(p); 4353 4354 update_rq_clock(task_rq(p)); 4355 4356 if (old_class != new_class) 4357 queue_flags |= DEQUEUE_CLASS; 4358 4359 scoped_guard (sched_change, p, queue_flags) { 4360 p->sched_class = new_class; 4361 } 4362 4363 scx_exit_task(p); 4364 } 4365 scx_task_iter_stop(&sti); 4366 percpu_up_write(&scx_fork_rwsem); 4367 4368 /* 4369 * Invalidate all the rq clocks to prevent getting outdated 4370 * rq clocks from a previous scx scheduler. 4371 */ 4372 for_each_possible_cpu(cpu) { 4373 struct rq *rq = cpu_rq(cpu); 4374 scx_rq_clock_invalidate(rq); 4375 } 4376 4377 /* no task is on scx, turn off all the switches and flush in-progress calls */ 4378 static_branch_disable(&__scx_enabled); 4379 bitmap_zero(sch->has_op, SCX_OPI_END); 4380 scx_idle_disable(); 4381 synchronize_rcu(); 4382 4383 if (ei->kind >= SCX_EXIT_ERROR) { 4384 pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n", 4385 sch->ops.name, ei->reason); 4386 4387 if (ei->msg[0] != '\0') 4388 pr_err("sched_ext: %s: %s\n", sch->ops.name, ei->msg); 4389 #ifdef CONFIG_STACKTRACE 4390 stack_trace_print(ei->bt, ei->bt_len, 2); 4391 #endif 4392 } else { 4393 pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n", 4394 sch->ops.name, ei->reason); 4395 } 4396 4397 if (sch->ops.exit) 4398 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, exit, NULL, ei); 4399 4400 cancel_delayed_work_sync(&scx_watchdog_work); 4401 4402 /* 4403 * scx_root clearing must be inside cpus_read_lock(). See 4404 * handle_hotplug(). 4405 */ 4406 cpus_read_lock(); 4407 RCU_INIT_POINTER(scx_root, NULL); 4408 cpus_read_unlock(); 4409 4410 /* 4411 * Delete the kobject from the hierarchy synchronously. Otherwise, sysfs 4412 * could observe an object of the same name still in the hierarchy when 4413 * the next scheduler is loaded. 4414 */ 4415 kobject_del(&sch->kobj); 4416 4417 free_percpu(scx_dsp_ctx); 4418 scx_dsp_ctx = NULL; 4419 scx_dsp_max_batch = 0; 4420 free_kick_syncs(); 4421 4422 if (scx_bypassed_for_enable) { 4423 scx_bypassed_for_enable = false; 4424 scx_bypass(false); 4425 } 4426 4427 mutex_unlock(&scx_enable_mutex); 4428 4429 WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING); 4430 done: 4431 scx_bypass(false); 4432 } 4433 4434 /* 4435 * Claim the exit on @sch. The caller must ensure that the helper kthread work 4436 * is kicked before the current task can be preempted. Once exit_kind is 4437 * claimed, scx_error() can no longer trigger, so if the current task gets 4438 * preempted and the BPF scheduler fails to schedule it back, the helper work 4439 * will never be kicked and the whole system can wedge. 4440 */ 4441 static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind) 4442 { 4443 int none = SCX_EXIT_NONE; 4444 4445 lockdep_assert_preemption_disabled(); 4446 4447 if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind)) 4448 return false; 4449 4450 /* 4451 * Some CPUs may be trapped in the dispatch paths. Set the aborting 4452 * flag to break potential live-lock scenarios, ensuring we can 4453 * successfully reach scx_bypass(). 4454 */ 4455 WRITE_ONCE(scx_aborting, true); 4456 return true; 4457 } 4458 4459 static void scx_disable(enum scx_exit_kind kind) 4460 { 4461 struct scx_sched *sch; 4462 4463 if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)) 4464 kind = SCX_EXIT_ERROR; 4465 4466 rcu_read_lock(); 4467 sch = rcu_dereference(scx_root); 4468 if (sch) { 4469 guard(preempt)(); 4470 scx_claim_exit(sch, kind); 4471 kthread_queue_work(sch->helper, &sch->disable_work); 4472 } 4473 rcu_read_unlock(); 4474 } 4475 4476 static void dump_newline(struct seq_buf *s) 4477 { 4478 trace_sched_ext_dump(""); 4479 4480 /* @s may be zero sized and seq_buf triggers WARN if so */ 4481 if (s->size) 4482 seq_buf_putc(s, '\n'); 4483 } 4484 4485 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...) 4486 { 4487 va_list args; 4488 4489 #ifdef CONFIG_TRACEPOINTS 4490 if (trace_sched_ext_dump_enabled()) { 4491 /* protected by scx_dump_state()::dump_lock */ 4492 static char line_buf[SCX_EXIT_MSG_LEN]; 4493 4494 va_start(args, fmt); 4495 vscnprintf(line_buf, sizeof(line_buf), fmt, args); 4496 va_end(args); 4497 4498 trace_sched_ext_dump(line_buf); 4499 } 4500 #endif 4501 /* @s may be zero sized and seq_buf triggers WARN if so */ 4502 if (s->size) { 4503 va_start(args, fmt); 4504 seq_buf_vprintf(s, fmt, args); 4505 va_end(args); 4506 4507 seq_buf_putc(s, '\n'); 4508 } 4509 } 4510 4511 static void dump_stack_trace(struct seq_buf *s, const char *prefix, 4512 const unsigned long *bt, unsigned int len) 4513 { 4514 unsigned int i; 4515 4516 for (i = 0; i < len; i++) 4517 dump_line(s, "%s%pS", prefix, (void *)bt[i]); 4518 } 4519 4520 static void ops_dump_init(struct seq_buf *s, const char *prefix) 4521 { 4522 struct scx_dump_data *dd = &scx_dump_data; 4523 4524 lockdep_assert_irqs_disabled(); 4525 4526 dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */ 4527 dd->first = true; 4528 dd->cursor = 0; 4529 dd->s = s; 4530 dd->prefix = prefix; 4531 } 4532 4533 static void ops_dump_flush(void) 4534 { 4535 struct scx_dump_data *dd = &scx_dump_data; 4536 char *line = dd->buf.line; 4537 4538 if (!dd->cursor) 4539 return; 4540 4541 /* 4542 * There's something to flush and this is the first line. Insert a blank 4543 * line to distinguish ops dump. 4544 */ 4545 if (dd->first) { 4546 dump_newline(dd->s); 4547 dd->first = false; 4548 } 4549 4550 /* 4551 * There may be multiple lines in $line. Scan and emit each line 4552 * separately. 4553 */ 4554 while (true) { 4555 char *end = line; 4556 char c; 4557 4558 while (*end != '\n' && *end != '\0') 4559 end++; 4560 4561 /* 4562 * If $line overflowed, it may not have newline at the end. 4563 * Always emit with a newline. 4564 */ 4565 c = *end; 4566 *end = '\0'; 4567 dump_line(dd->s, "%s%s", dd->prefix, line); 4568 if (c == '\0') 4569 break; 4570 4571 /* move to the next line */ 4572 end++; 4573 if (*end == '\0') 4574 break; 4575 line = end; 4576 } 4577 4578 dd->cursor = 0; 4579 } 4580 4581 static void ops_dump_exit(void) 4582 { 4583 ops_dump_flush(); 4584 scx_dump_data.cpu = -1; 4585 } 4586 4587 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx, 4588 struct task_struct *p, char marker) 4589 { 4590 static unsigned long bt[SCX_EXIT_BT_LEN]; 4591 struct scx_sched *sch = scx_root; 4592 char dsq_id_buf[19] = "(n/a)"; 4593 unsigned long ops_state = atomic_long_read(&p->scx.ops_state); 4594 unsigned int bt_len = 0; 4595 4596 if (p->scx.dsq) 4597 scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx", 4598 (unsigned long long)p->scx.dsq->id); 4599 4600 dump_newline(s); 4601 dump_line(s, " %c%c %s[%d] %+ldms", 4602 marker, task_state_to_char(p), p->comm, p->pid, 4603 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies)); 4604 dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu", 4605 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK, 4606 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK, 4607 ops_state >> SCX_OPSS_QSEQ_SHIFT); 4608 dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s", 4609 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf); 4610 dump_line(s, " dsq_vtime=%llu slice=%llu weight=%u", 4611 p->scx.dsq_vtime, p->scx.slice, p->scx.weight); 4612 dump_line(s, " cpus=%*pb no_mig=%u", cpumask_pr_args(p->cpus_ptr), 4613 p->migration_disabled); 4614 4615 if (SCX_HAS_OP(sch, dump_task)) { 4616 ops_dump_init(s, " "); 4617 SCX_CALL_OP(sch, SCX_KF_REST, dump_task, NULL, dctx, p); 4618 ops_dump_exit(); 4619 } 4620 4621 #ifdef CONFIG_STACKTRACE 4622 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1); 4623 #endif 4624 if (bt_len) { 4625 dump_newline(s); 4626 dump_stack_trace(s, " ", bt, bt_len); 4627 } 4628 } 4629 4630 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len) 4631 { 4632 static DEFINE_SPINLOCK(dump_lock); 4633 static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n"; 4634 struct scx_sched *sch = scx_root; 4635 struct scx_dump_ctx dctx = { 4636 .kind = ei->kind, 4637 .exit_code = ei->exit_code, 4638 .reason = ei->reason, 4639 .at_ns = ktime_get_ns(), 4640 .at_jiffies = jiffies, 4641 }; 4642 struct seq_buf s; 4643 struct scx_event_stats events; 4644 unsigned long flags; 4645 char *buf; 4646 int cpu; 4647 4648 spin_lock_irqsave(&dump_lock, flags); 4649 4650 seq_buf_init(&s, ei->dump, dump_len); 4651 4652 if (ei->kind == SCX_EXIT_NONE) { 4653 dump_line(&s, "Debug dump triggered by %s", ei->reason); 4654 } else { 4655 dump_line(&s, "%s[%d] triggered exit kind %d:", 4656 current->comm, current->pid, ei->kind); 4657 dump_line(&s, " %s (%s)", ei->reason, ei->msg); 4658 dump_newline(&s); 4659 dump_line(&s, "Backtrace:"); 4660 dump_stack_trace(&s, " ", ei->bt, ei->bt_len); 4661 } 4662 4663 if (SCX_HAS_OP(sch, dump)) { 4664 ops_dump_init(&s, ""); 4665 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, dump, NULL, &dctx); 4666 ops_dump_exit(); 4667 } 4668 4669 dump_newline(&s); 4670 dump_line(&s, "CPU states"); 4671 dump_line(&s, "----------"); 4672 4673 for_each_possible_cpu(cpu) { 4674 struct rq *rq = cpu_rq(cpu); 4675 struct rq_flags rf; 4676 struct task_struct *p; 4677 struct seq_buf ns; 4678 size_t avail, used; 4679 bool idle; 4680 4681 rq_lock_irqsave(rq, &rf); 4682 4683 idle = list_empty(&rq->scx.runnable_list) && 4684 rq->curr->sched_class == &idle_sched_class; 4685 4686 if (idle && !SCX_HAS_OP(sch, dump_cpu)) 4687 goto next; 4688 4689 /* 4690 * We don't yet know whether ops.dump_cpu() will produce output 4691 * and we may want to skip the default CPU dump if it doesn't. 4692 * Use a nested seq_buf to generate the standard dump so that we 4693 * can decide whether to commit later. 4694 */ 4695 avail = seq_buf_get_buf(&s, &buf); 4696 seq_buf_init(&ns, buf, avail); 4697 4698 dump_newline(&ns); 4699 dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu ksync=%lu", 4700 cpu, rq->scx.nr_running, rq->scx.flags, 4701 rq->scx.cpu_released, rq->scx.ops_qseq, 4702 rq->scx.kick_sync); 4703 dump_line(&ns, " curr=%s[%d] class=%ps", 4704 rq->curr->comm, rq->curr->pid, 4705 rq->curr->sched_class); 4706 if (!cpumask_empty(rq->scx.cpus_to_kick)) 4707 dump_line(&ns, " cpus_to_kick : %*pb", 4708 cpumask_pr_args(rq->scx.cpus_to_kick)); 4709 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle)) 4710 dump_line(&ns, " idle_to_kick : %*pb", 4711 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle)); 4712 if (!cpumask_empty(rq->scx.cpus_to_preempt)) 4713 dump_line(&ns, " cpus_to_preempt: %*pb", 4714 cpumask_pr_args(rq->scx.cpus_to_preempt)); 4715 if (!cpumask_empty(rq->scx.cpus_to_wait)) 4716 dump_line(&ns, " cpus_to_wait : %*pb", 4717 cpumask_pr_args(rq->scx.cpus_to_wait)); 4718 4719 used = seq_buf_used(&ns); 4720 if (SCX_HAS_OP(sch, dump_cpu)) { 4721 ops_dump_init(&ns, " "); 4722 SCX_CALL_OP(sch, SCX_KF_REST, dump_cpu, NULL, 4723 &dctx, cpu, idle); 4724 ops_dump_exit(); 4725 } 4726 4727 /* 4728 * If idle && nothing generated by ops.dump_cpu(), there's 4729 * nothing interesting. Skip. 4730 */ 4731 if (idle && used == seq_buf_used(&ns)) 4732 goto next; 4733 4734 /* 4735 * $s may already have overflowed when $ns was created. If so, 4736 * calling commit on it will trigger BUG. 4737 */ 4738 if (avail) { 4739 seq_buf_commit(&s, seq_buf_used(&ns)); 4740 if (seq_buf_has_overflowed(&ns)) 4741 seq_buf_set_overflow(&s); 4742 } 4743 4744 if (rq->curr->sched_class == &ext_sched_class) 4745 scx_dump_task(&s, &dctx, rq->curr, '*'); 4746 4747 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) 4748 scx_dump_task(&s, &dctx, p, ' '); 4749 next: 4750 rq_unlock_irqrestore(rq, &rf); 4751 } 4752 4753 dump_newline(&s); 4754 dump_line(&s, "Event counters"); 4755 dump_line(&s, "--------------"); 4756 4757 scx_read_events(sch, &events); 4758 scx_dump_event(s, &events, SCX_EV_SELECT_CPU_FALLBACK); 4759 scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE); 4760 scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST); 4761 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_EXITING); 4762 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED); 4763 scx_dump_event(s, &events, SCX_EV_REFILL_SLICE_DFL); 4764 scx_dump_event(s, &events, SCX_EV_BYPASS_DURATION); 4765 scx_dump_event(s, &events, SCX_EV_BYPASS_DISPATCH); 4766 scx_dump_event(s, &events, SCX_EV_BYPASS_ACTIVATE); 4767 4768 if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker)) 4769 memcpy(ei->dump + dump_len - sizeof(trunc_marker), 4770 trunc_marker, sizeof(trunc_marker)); 4771 4772 spin_unlock_irqrestore(&dump_lock, flags); 4773 } 4774 4775 static void scx_error_irq_workfn(struct irq_work *irq_work) 4776 { 4777 struct scx_sched *sch = container_of(irq_work, struct scx_sched, error_irq_work); 4778 struct scx_exit_info *ei = sch->exit_info; 4779 4780 if (ei->kind >= SCX_EXIT_ERROR) 4781 scx_dump_state(ei, sch->ops.exit_dump_len); 4782 4783 kthread_queue_work(sch->helper, &sch->disable_work); 4784 } 4785 4786 static bool scx_vexit(struct scx_sched *sch, 4787 enum scx_exit_kind kind, s64 exit_code, 4788 const char *fmt, va_list args) 4789 { 4790 struct scx_exit_info *ei = sch->exit_info; 4791 4792 guard(preempt)(); 4793 4794 if (!scx_claim_exit(sch, kind)) 4795 return false; 4796 4797 ei->exit_code = exit_code; 4798 #ifdef CONFIG_STACKTRACE 4799 if (kind >= SCX_EXIT_ERROR) 4800 ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1); 4801 #endif 4802 vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args); 4803 4804 /* 4805 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again 4806 * in scx_disable_workfn(). 4807 */ 4808 ei->kind = kind; 4809 ei->reason = scx_exit_reason(ei->kind); 4810 4811 irq_work_queue(&sch->error_irq_work); 4812 return true; 4813 } 4814 4815 static int alloc_kick_syncs(void) 4816 { 4817 int cpu; 4818 4819 /* 4820 * Allocate per-CPU arrays sized by nr_cpu_ids. Use kvzalloc as size 4821 * can exceed percpu allocator limits on large machines. 4822 */ 4823 for_each_possible_cpu(cpu) { 4824 struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu); 4825 struct scx_kick_syncs *new_ksyncs; 4826 4827 WARN_ON_ONCE(rcu_access_pointer(*ksyncs)); 4828 4829 new_ksyncs = kvzalloc_node(struct_size(new_ksyncs, syncs, nr_cpu_ids), 4830 GFP_KERNEL, cpu_to_node(cpu)); 4831 if (!new_ksyncs) { 4832 free_kick_syncs(); 4833 return -ENOMEM; 4834 } 4835 4836 rcu_assign_pointer(*ksyncs, new_ksyncs); 4837 } 4838 4839 return 0; 4840 } 4841 4842 static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops) 4843 { 4844 struct scx_sched *sch; 4845 int node, ret; 4846 4847 sch = kzalloc(sizeof(*sch), GFP_KERNEL); 4848 if (!sch) 4849 return ERR_PTR(-ENOMEM); 4850 4851 sch->exit_info = alloc_exit_info(ops->exit_dump_len); 4852 if (!sch->exit_info) { 4853 ret = -ENOMEM; 4854 goto err_free_sch; 4855 } 4856 4857 ret = rhashtable_init(&sch->dsq_hash, &dsq_hash_params); 4858 if (ret < 0) 4859 goto err_free_ei; 4860 4861 sch->global_dsqs = kcalloc(nr_node_ids, sizeof(sch->global_dsqs[0]), 4862 GFP_KERNEL); 4863 if (!sch->global_dsqs) { 4864 ret = -ENOMEM; 4865 goto err_free_hash; 4866 } 4867 4868 for_each_node_state(node, N_POSSIBLE) { 4869 struct scx_dispatch_q *dsq; 4870 4871 dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node); 4872 if (!dsq) { 4873 ret = -ENOMEM; 4874 goto err_free_gdsqs; 4875 } 4876 4877 init_dsq(dsq, SCX_DSQ_GLOBAL); 4878 sch->global_dsqs[node] = dsq; 4879 } 4880 4881 sch->pcpu = alloc_percpu(struct scx_sched_pcpu); 4882 if (!sch->pcpu) { 4883 ret = -ENOMEM; 4884 goto err_free_gdsqs; 4885 } 4886 4887 sch->helper = kthread_run_worker(0, "sched_ext_helper"); 4888 if (IS_ERR(sch->helper)) { 4889 ret = PTR_ERR(sch->helper); 4890 goto err_free_pcpu; 4891 } 4892 4893 sched_set_fifo(sch->helper->task); 4894 4895 atomic_set(&sch->exit_kind, SCX_EXIT_NONE); 4896 init_irq_work(&sch->error_irq_work, scx_error_irq_workfn); 4897 kthread_init_work(&sch->disable_work, scx_disable_workfn); 4898 sch->ops = *ops; 4899 ops->priv = sch; 4900 4901 sch->kobj.kset = scx_kset; 4902 ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root"); 4903 if (ret < 0) 4904 goto err_stop_helper; 4905 4906 return sch; 4907 4908 err_stop_helper: 4909 kthread_destroy_worker(sch->helper); 4910 err_free_pcpu: 4911 free_percpu(sch->pcpu); 4912 err_free_gdsqs: 4913 for_each_node_state(node, N_POSSIBLE) 4914 kfree(sch->global_dsqs[node]); 4915 kfree(sch->global_dsqs); 4916 err_free_hash: 4917 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL); 4918 err_free_ei: 4919 free_exit_info(sch->exit_info); 4920 err_free_sch: 4921 kfree(sch); 4922 return ERR_PTR(ret); 4923 } 4924 4925 static int check_hotplug_seq(struct scx_sched *sch, 4926 const struct sched_ext_ops *ops) 4927 { 4928 unsigned long long global_hotplug_seq; 4929 4930 /* 4931 * If a hotplug event has occurred between when a scheduler was 4932 * initialized, and when we were able to attach, exit and notify user 4933 * space about it. 4934 */ 4935 if (ops->hotplug_seq) { 4936 global_hotplug_seq = atomic_long_read(&scx_hotplug_seq); 4937 if (ops->hotplug_seq != global_hotplug_seq) { 4938 scx_exit(sch, SCX_EXIT_UNREG_KERN, 4939 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG, 4940 "expected hotplug seq %llu did not match actual %llu", 4941 ops->hotplug_seq, global_hotplug_seq); 4942 return -EBUSY; 4943 } 4944 } 4945 4946 return 0; 4947 } 4948 4949 static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops) 4950 { 4951 /* 4952 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the 4953 * ops.enqueue() callback isn't implemented. 4954 */ 4955 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) { 4956 scx_error(sch, "SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented"); 4957 return -EINVAL; 4958 } 4959 4960 /* 4961 * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle 4962 * selection policy to be enabled. 4963 */ 4964 if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) && 4965 (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) { 4966 scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled"); 4967 return -EINVAL; 4968 } 4969 4970 if (ops->flags & SCX_OPS_HAS_CGROUP_WEIGHT) 4971 pr_warn("SCX_OPS_HAS_CGROUP_WEIGHT is deprecated and a noop\n"); 4972 4973 if (ops->cpu_acquire || ops->cpu_release) 4974 pr_warn("ops->cpu_acquire/release() are deprecated, use sched_switch TP instead\n"); 4975 4976 return 0; 4977 } 4978 4979 static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) 4980 { 4981 struct scx_sched *sch; 4982 struct scx_task_iter sti; 4983 struct task_struct *p; 4984 unsigned long timeout; 4985 int i, cpu, ret; 4986 4987 if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN), 4988 cpu_possible_mask)) { 4989 pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n"); 4990 return -EINVAL; 4991 } 4992 4993 mutex_lock(&scx_enable_mutex); 4994 4995 if (scx_enable_state() != SCX_DISABLED) { 4996 ret = -EBUSY; 4997 goto err_unlock; 4998 } 4999 5000 ret = alloc_kick_syncs(); 5001 if (ret) 5002 goto err_unlock; 5003 5004 sch = scx_alloc_and_add_sched(ops); 5005 if (IS_ERR(sch)) { 5006 ret = PTR_ERR(sch); 5007 goto err_free_ksyncs; 5008 } 5009 5010 /* 5011 * Transition to ENABLING and clear exit info to arm the disable path. 5012 * Failure triggers full disabling from here on. 5013 */ 5014 WARN_ON_ONCE(scx_set_enable_state(SCX_ENABLING) != SCX_DISABLED); 5015 WARN_ON_ONCE(scx_root); 5016 if (WARN_ON_ONCE(READ_ONCE(scx_aborting))) 5017 WRITE_ONCE(scx_aborting, false); 5018 5019 atomic_long_set(&scx_nr_rejected, 0); 5020 5021 for_each_possible_cpu(cpu) 5022 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE; 5023 5024 /* 5025 * Keep CPUs stable during enable so that the BPF scheduler can track 5026 * online CPUs by watching ->on/offline_cpu() after ->init(). 5027 */ 5028 cpus_read_lock(); 5029 5030 /* 5031 * Make the scheduler instance visible. Must be inside cpus_read_lock(). 5032 * See handle_hotplug(). 5033 */ 5034 rcu_assign_pointer(scx_root, sch); 5035 5036 scx_idle_enable(ops); 5037 5038 if (sch->ops.init) { 5039 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init, NULL); 5040 if (ret) { 5041 ret = ops_sanitize_err(sch, "init", ret); 5042 cpus_read_unlock(); 5043 scx_error(sch, "ops.init() failed (%d)", ret); 5044 goto err_disable; 5045 } 5046 sch->exit_info->flags |= SCX_EFLAG_INITIALIZED; 5047 } 5048 5049 for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++) 5050 if (((void (**)(void))ops)[i]) 5051 set_bit(i, sch->has_op); 5052 5053 ret = check_hotplug_seq(sch, ops); 5054 if (ret) { 5055 cpus_read_unlock(); 5056 goto err_disable; 5057 } 5058 scx_idle_update_selcpu_topology(ops); 5059 5060 cpus_read_unlock(); 5061 5062 ret = validate_ops(sch, ops); 5063 if (ret) 5064 goto err_disable; 5065 5066 WARN_ON_ONCE(scx_dsp_ctx); 5067 scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH; 5068 scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf, 5069 scx_dsp_max_batch), 5070 __alignof__(struct scx_dsp_ctx)); 5071 if (!scx_dsp_ctx) { 5072 ret = -ENOMEM; 5073 goto err_disable; 5074 } 5075 5076 if (ops->timeout_ms) 5077 timeout = msecs_to_jiffies(ops->timeout_ms); 5078 else 5079 timeout = SCX_WATCHDOG_MAX_TIMEOUT; 5080 5081 WRITE_ONCE(scx_watchdog_timeout, timeout); 5082 WRITE_ONCE(scx_watchdog_timestamp, jiffies); 5083 queue_delayed_work(system_unbound_wq, &scx_watchdog_work, 5084 READ_ONCE(scx_watchdog_timeout) / 2); 5085 5086 /* 5087 * Once __scx_enabled is set, %current can be switched to SCX anytime. 5088 * This can lead to stalls as some BPF schedulers (e.g. userspace 5089 * scheduling) may not function correctly before all tasks are switched. 5090 * Init in bypass mode to guarantee forward progress. 5091 */ 5092 scx_bypass(true); 5093 scx_bypassed_for_enable = true; 5094 5095 for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++) 5096 if (((void (**)(void))ops)[i]) 5097 set_bit(i, sch->has_op); 5098 5099 if (sch->ops.cpu_acquire || sch->ops.cpu_release) 5100 sch->ops.flags |= SCX_OPS_HAS_CPU_PREEMPT; 5101 5102 /* 5103 * Lock out forks, cgroup on/offlining and moves before opening the 5104 * floodgate so that they don't wander into the operations prematurely. 5105 */ 5106 percpu_down_write(&scx_fork_rwsem); 5107 5108 WARN_ON_ONCE(scx_init_task_enabled); 5109 scx_init_task_enabled = true; 5110 5111 /* 5112 * Enable ops for every task. Fork is excluded by scx_fork_rwsem 5113 * preventing new tasks from being added. No need to exclude tasks 5114 * leaving as sched_ext_free() can handle both prepped and enabled 5115 * tasks. Prep all tasks first and then enable them with preemption 5116 * disabled. 5117 * 5118 * All cgroups should be initialized before scx_init_task() so that the 5119 * BPF scheduler can reliably track each task's cgroup membership from 5120 * scx_init_task(). Lock out cgroup on/offlining and task migrations 5121 * while tasks are being initialized so that scx_cgroup_can_attach() 5122 * never sees uninitialized tasks. 5123 */ 5124 scx_cgroup_lock(); 5125 ret = scx_cgroup_init(sch); 5126 if (ret) 5127 goto err_disable_unlock_all; 5128 5129 scx_task_iter_start(&sti); 5130 while ((p = scx_task_iter_next_locked(&sti))) { 5131 /* 5132 * @p may already be dead, have lost all its usages counts and 5133 * be waiting for RCU grace period before being freed. @p can't 5134 * be initialized for SCX in such cases and should be ignored. 5135 */ 5136 if (!tryget_task_struct(p)) 5137 continue; 5138 5139 scx_task_iter_unlock(&sti); 5140 5141 ret = scx_init_task(p, task_group(p), false); 5142 if (ret) { 5143 put_task_struct(p); 5144 scx_task_iter_stop(&sti); 5145 scx_error(sch, "ops.init_task() failed (%d) for %s[%d]", 5146 ret, p->comm, p->pid); 5147 goto err_disable_unlock_all; 5148 } 5149 5150 scx_set_task_state(p, SCX_TASK_READY); 5151 5152 put_task_struct(p); 5153 } 5154 scx_task_iter_stop(&sti); 5155 scx_cgroup_unlock(); 5156 percpu_up_write(&scx_fork_rwsem); 5157 5158 /* 5159 * All tasks are READY. It's safe to turn on scx_enabled() and switch 5160 * all eligible tasks. 5161 */ 5162 WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL)); 5163 static_branch_enable(&__scx_enabled); 5164 5165 /* 5166 * We're fully committed and can't fail. The task READY -> ENABLED 5167 * transitions here are synchronized against sched_ext_free() through 5168 * scx_tasks_lock. 5169 */ 5170 percpu_down_write(&scx_fork_rwsem); 5171 scx_task_iter_start(&sti); 5172 while ((p = scx_task_iter_next_locked(&sti))) { 5173 unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE; 5174 const struct sched_class *old_class = p->sched_class; 5175 const struct sched_class *new_class = scx_setscheduler_class(p); 5176 5177 if (scx_get_task_state(p) != SCX_TASK_READY) 5178 continue; 5179 5180 if (old_class != new_class) 5181 queue_flags |= DEQUEUE_CLASS; 5182 5183 scoped_guard (sched_change, p, queue_flags) { 5184 p->scx.slice = READ_ONCE(scx_slice_dfl); 5185 p->sched_class = new_class; 5186 } 5187 } 5188 scx_task_iter_stop(&sti); 5189 percpu_up_write(&scx_fork_rwsem); 5190 5191 scx_bypassed_for_enable = false; 5192 scx_bypass(false); 5193 5194 if (!scx_tryset_enable_state(SCX_ENABLED, SCX_ENABLING)) { 5195 WARN_ON_ONCE(atomic_read(&sch->exit_kind) == SCX_EXIT_NONE); 5196 goto err_disable; 5197 } 5198 5199 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL)) 5200 static_branch_enable(&__scx_switched_all); 5201 5202 pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n", 5203 sch->ops.name, scx_switched_all() ? "" : " (partial)"); 5204 kobject_uevent(&sch->kobj, KOBJ_ADD); 5205 mutex_unlock(&scx_enable_mutex); 5206 5207 atomic_long_inc(&scx_enable_seq); 5208 5209 return 0; 5210 5211 err_free_ksyncs: 5212 free_kick_syncs(); 5213 err_unlock: 5214 mutex_unlock(&scx_enable_mutex); 5215 return ret; 5216 5217 err_disable_unlock_all: 5218 scx_cgroup_unlock(); 5219 percpu_up_write(&scx_fork_rwsem); 5220 /* we'll soon enter disable path, keep bypass on */ 5221 err_disable: 5222 mutex_unlock(&scx_enable_mutex); 5223 /* 5224 * Returning an error code here would not pass all the error information 5225 * to userspace. Record errno using scx_error() for cases scx_error() 5226 * wasn't already invoked and exit indicating success so that the error 5227 * is notified through ops.exit() with all the details. 5228 * 5229 * Flush scx_disable_work to ensure that error is reported before init 5230 * completion. sch's base reference will be put by bpf_scx_unreg(). 5231 */ 5232 scx_error(sch, "scx_enable() failed (%d)", ret); 5233 kthread_flush_work(&sch->disable_work); 5234 return 0; 5235 } 5236 5237 5238 /******************************************************************************** 5239 * bpf_struct_ops plumbing. 5240 */ 5241 #include <linux/bpf_verifier.h> 5242 #include <linux/bpf.h> 5243 #include <linux/btf.h> 5244 5245 static const struct btf_type *task_struct_type; 5246 5247 static bool bpf_scx_is_valid_access(int off, int size, 5248 enum bpf_access_type type, 5249 const struct bpf_prog *prog, 5250 struct bpf_insn_access_aux *info) 5251 { 5252 if (type != BPF_READ) 5253 return false; 5254 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 5255 return false; 5256 if (off % size != 0) 5257 return false; 5258 5259 return btf_ctx_access(off, size, type, prog, info); 5260 } 5261 5262 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log, 5263 const struct bpf_reg_state *reg, int off, 5264 int size) 5265 { 5266 const struct btf_type *t; 5267 5268 t = btf_type_by_id(reg->btf, reg->btf_id); 5269 if (t == task_struct_type) { 5270 if (off >= offsetof(struct task_struct, scx.slice) && 5271 off + size <= offsetofend(struct task_struct, scx.slice)) 5272 return SCALAR_VALUE; 5273 if (off >= offsetof(struct task_struct, scx.dsq_vtime) && 5274 off + size <= offsetofend(struct task_struct, scx.dsq_vtime)) 5275 return SCALAR_VALUE; 5276 if (off >= offsetof(struct task_struct, scx.disallow) && 5277 off + size <= offsetofend(struct task_struct, scx.disallow)) 5278 return SCALAR_VALUE; 5279 } 5280 5281 return -EACCES; 5282 } 5283 5284 static const struct bpf_verifier_ops bpf_scx_verifier_ops = { 5285 .get_func_proto = bpf_base_func_proto, 5286 .is_valid_access = bpf_scx_is_valid_access, 5287 .btf_struct_access = bpf_scx_btf_struct_access, 5288 }; 5289 5290 static int bpf_scx_init_member(const struct btf_type *t, 5291 const struct btf_member *member, 5292 void *kdata, const void *udata) 5293 { 5294 const struct sched_ext_ops *uops = udata; 5295 struct sched_ext_ops *ops = kdata; 5296 u32 moff = __btf_member_bit_offset(t, member) / 8; 5297 int ret; 5298 5299 switch (moff) { 5300 case offsetof(struct sched_ext_ops, dispatch_max_batch): 5301 if (*(u32 *)(udata + moff) > INT_MAX) 5302 return -E2BIG; 5303 ops->dispatch_max_batch = *(u32 *)(udata + moff); 5304 return 1; 5305 case offsetof(struct sched_ext_ops, flags): 5306 if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS) 5307 return -EINVAL; 5308 ops->flags = *(u64 *)(udata + moff); 5309 return 1; 5310 case offsetof(struct sched_ext_ops, name): 5311 ret = bpf_obj_name_cpy(ops->name, uops->name, 5312 sizeof(ops->name)); 5313 if (ret < 0) 5314 return ret; 5315 if (ret == 0) 5316 return -EINVAL; 5317 return 1; 5318 case offsetof(struct sched_ext_ops, timeout_ms): 5319 if (msecs_to_jiffies(*(u32 *)(udata + moff)) > 5320 SCX_WATCHDOG_MAX_TIMEOUT) 5321 return -E2BIG; 5322 ops->timeout_ms = *(u32 *)(udata + moff); 5323 return 1; 5324 case offsetof(struct sched_ext_ops, exit_dump_len): 5325 ops->exit_dump_len = 5326 *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN; 5327 return 1; 5328 case offsetof(struct sched_ext_ops, hotplug_seq): 5329 ops->hotplug_seq = *(u64 *)(udata + moff); 5330 return 1; 5331 } 5332 5333 return 0; 5334 } 5335 5336 static int bpf_scx_check_member(const struct btf_type *t, 5337 const struct btf_member *member, 5338 const struct bpf_prog *prog) 5339 { 5340 u32 moff = __btf_member_bit_offset(t, member) / 8; 5341 5342 switch (moff) { 5343 case offsetof(struct sched_ext_ops, init_task): 5344 #ifdef CONFIG_EXT_GROUP_SCHED 5345 case offsetof(struct sched_ext_ops, cgroup_init): 5346 case offsetof(struct sched_ext_ops, cgroup_exit): 5347 case offsetof(struct sched_ext_ops, cgroup_prep_move): 5348 #endif 5349 case offsetof(struct sched_ext_ops, cpu_online): 5350 case offsetof(struct sched_ext_ops, cpu_offline): 5351 case offsetof(struct sched_ext_ops, init): 5352 case offsetof(struct sched_ext_ops, exit): 5353 break; 5354 default: 5355 if (prog->sleepable) 5356 return -EINVAL; 5357 } 5358 5359 return 0; 5360 } 5361 5362 static int bpf_scx_reg(void *kdata, struct bpf_link *link) 5363 { 5364 return scx_enable(kdata, link); 5365 } 5366 5367 static void bpf_scx_unreg(void *kdata, struct bpf_link *link) 5368 { 5369 struct sched_ext_ops *ops = kdata; 5370 struct scx_sched *sch = ops->priv; 5371 5372 scx_disable(SCX_EXIT_UNREG); 5373 kthread_flush_work(&sch->disable_work); 5374 kobject_put(&sch->kobj); 5375 } 5376 5377 static int bpf_scx_init(struct btf *btf) 5378 { 5379 task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]); 5380 5381 return 0; 5382 } 5383 5384 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link) 5385 { 5386 /* 5387 * sched_ext does not support updating the actively-loaded BPF 5388 * scheduler, as registering a BPF scheduler can always fail if the 5389 * scheduler returns an error code for e.g. ops.init(), ops.init_task(), 5390 * etc. Similarly, we can always race with unregistration happening 5391 * elsewhere, such as with sysrq. 5392 */ 5393 return -EOPNOTSUPP; 5394 } 5395 5396 static int bpf_scx_validate(void *kdata) 5397 { 5398 return 0; 5399 } 5400 5401 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; } 5402 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {} 5403 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {} 5404 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {} 5405 static void sched_ext_ops__tick(struct task_struct *p) {} 5406 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {} 5407 static void sched_ext_ops__running(struct task_struct *p) {} 5408 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {} 5409 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {} 5410 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; } 5411 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; } 5412 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {} 5413 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {} 5414 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {} 5415 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {} 5416 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {} 5417 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; } 5418 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {} 5419 static void sched_ext_ops__enable(struct task_struct *p) {} 5420 static void sched_ext_ops__disable(struct task_struct *p) {} 5421 #ifdef CONFIG_EXT_GROUP_SCHED 5422 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; } 5423 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {} 5424 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; } 5425 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {} 5426 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {} 5427 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {} 5428 static void sched_ext_ops__cgroup_set_bandwidth(struct cgroup *cgrp, u64 period_us, u64 quota_us, u64 burst_us) {} 5429 static void sched_ext_ops__cgroup_set_idle(struct cgroup *cgrp, bool idle) {} 5430 #endif 5431 static void sched_ext_ops__cpu_online(s32 cpu) {} 5432 static void sched_ext_ops__cpu_offline(s32 cpu) {} 5433 static s32 sched_ext_ops__init(void) { return -EINVAL; } 5434 static void sched_ext_ops__exit(struct scx_exit_info *info) {} 5435 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {} 5436 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {} 5437 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {} 5438 5439 static struct sched_ext_ops __bpf_ops_sched_ext_ops = { 5440 .select_cpu = sched_ext_ops__select_cpu, 5441 .enqueue = sched_ext_ops__enqueue, 5442 .dequeue = sched_ext_ops__dequeue, 5443 .dispatch = sched_ext_ops__dispatch, 5444 .tick = sched_ext_ops__tick, 5445 .runnable = sched_ext_ops__runnable, 5446 .running = sched_ext_ops__running, 5447 .stopping = sched_ext_ops__stopping, 5448 .quiescent = sched_ext_ops__quiescent, 5449 .yield = sched_ext_ops__yield, 5450 .core_sched_before = sched_ext_ops__core_sched_before, 5451 .set_weight = sched_ext_ops__set_weight, 5452 .set_cpumask = sched_ext_ops__set_cpumask, 5453 .update_idle = sched_ext_ops__update_idle, 5454 .cpu_acquire = sched_ext_ops__cpu_acquire, 5455 .cpu_release = sched_ext_ops__cpu_release, 5456 .init_task = sched_ext_ops__init_task, 5457 .exit_task = sched_ext_ops__exit_task, 5458 .enable = sched_ext_ops__enable, 5459 .disable = sched_ext_ops__disable, 5460 #ifdef CONFIG_EXT_GROUP_SCHED 5461 .cgroup_init = sched_ext_ops__cgroup_init, 5462 .cgroup_exit = sched_ext_ops__cgroup_exit, 5463 .cgroup_prep_move = sched_ext_ops__cgroup_prep_move, 5464 .cgroup_move = sched_ext_ops__cgroup_move, 5465 .cgroup_cancel_move = sched_ext_ops__cgroup_cancel_move, 5466 .cgroup_set_weight = sched_ext_ops__cgroup_set_weight, 5467 .cgroup_set_bandwidth = sched_ext_ops__cgroup_set_bandwidth, 5468 .cgroup_set_idle = sched_ext_ops__cgroup_set_idle, 5469 #endif 5470 .cpu_online = sched_ext_ops__cpu_online, 5471 .cpu_offline = sched_ext_ops__cpu_offline, 5472 .init = sched_ext_ops__init, 5473 .exit = sched_ext_ops__exit, 5474 .dump = sched_ext_ops__dump, 5475 .dump_cpu = sched_ext_ops__dump_cpu, 5476 .dump_task = sched_ext_ops__dump_task, 5477 }; 5478 5479 static struct bpf_struct_ops bpf_sched_ext_ops = { 5480 .verifier_ops = &bpf_scx_verifier_ops, 5481 .reg = bpf_scx_reg, 5482 .unreg = bpf_scx_unreg, 5483 .check_member = bpf_scx_check_member, 5484 .init_member = bpf_scx_init_member, 5485 .init = bpf_scx_init, 5486 .update = bpf_scx_update, 5487 .validate = bpf_scx_validate, 5488 .name = "sched_ext_ops", 5489 .owner = THIS_MODULE, 5490 .cfi_stubs = &__bpf_ops_sched_ext_ops 5491 }; 5492 5493 5494 /******************************************************************************** 5495 * System integration and init. 5496 */ 5497 5498 static void sysrq_handle_sched_ext_reset(u8 key) 5499 { 5500 scx_disable(SCX_EXIT_SYSRQ); 5501 } 5502 5503 static const struct sysrq_key_op sysrq_sched_ext_reset_op = { 5504 .handler = sysrq_handle_sched_ext_reset, 5505 .help_msg = "reset-sched-ext(S)", 5506 .action_msg = "Disable sched_ext and revert all tasks to CFS", 5507 .enable_mask = SYSRQ_ENABLE_RTNICE, 5508 }; 5509 5510 static void sysrq_handle_sched_ext_dump(u8 key) 5511 { 5512 struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" }; 5513 5514 if (scx_enabled()) 5515 scx_dump_state(&ei, 0); 5516 } 5517 5518 static const struct sysrq_key_op sysrq_sched_ext_dump_op = { 5519 .handler = sysrq_handle_sched_ext_dump, 5520 .help_msg = "dump-sched-ext(D)", 5521 .action_msg = "Trigger sched_ext debug dump", 5522 .enable_mask = SYSRQ_ENABLE_RTNICE, 5523 }; 5524 5525 static bool can_skip_idle_kick(struct rq *rq) 5526 { 5527 lockdep_assert_rq_held(rq); 5528 5529 /* 5530 * We can skip idle kicking if @rq is going to go through at least one 5531 * full SCX scheduling cycle before going idle. Just checking whether 5532 * curr is not idle is insufficient because we could be racing 5533 * balance_one() trying to pull the next task from a remote rq, which 5534 * may fail, and @rq may become idle afterwards. 5535 * 5536 * The race window is small and we don't and can't guarantee that @rq is 5537 * only kicked while idle anyway. Skip only when sure. 5538 */ 5539 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE); 5540 } 5541 5542 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *ksyncs) 5543 { 5544 struct rq *rq = cpu_rq(cpu); 5545 struct scx_rq *this_scx = &this_rq->scx; 5546 const struct sched_class *cur_class; 5547 bool should_wait = false; 5548 unsigned long flags; 5549 5550 raw_spin_rq_lock_irqsave(rq, flags); 5551 cur_class = rq->curr->sched_class; 5552 5553 /* 5554 * During CPU hotplug, a CPU may depend on kicking itself to make 5555 * forward progress. Allow kicking self regardless of online state. If 5556 * @cpu is running a higher class task, we have no control over @cpu. 5557 * Skip kicking. 5558 */ 5559 if ((cpu_online(cpu) || cpu == cpu_of(this_rq)) && 5560 !sched_class_above(cur_class, &ext_sched_class)) { 5561 if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) { 5562 if (cur_class == &ext_sched_class) 5563 rq->curr->scx.slice = 0; 5564 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt); 5565 } 5566 5567 if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) { 5568 if (cur_class == &ext_sched_class) { 5569 ksyncs[cpu] = rq->scx.kick_sync; 5570 should_wait = true; 5571 } else { 5572 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); 5573 } 5574 } 5575 5576 resched_curr(rq); 5577 } else { 5578 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt); 5579 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); 5580 } 5581 5582 raw_spin_rq_unlock_irqrestore(rq, flags); 5583 5584 return should_wait; 5585 } 5586 5587 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq) 5588 { 5589 struct rq *rq = cpu_rq(cpu); 5590 unsigned long flags; 5591 5592 raw_spin_rq_lock_irqsave(rq, flags); 5593 5594 if (!can_skip_idle_kick(rq) && 5595 (cpu_online(cpu) || cpu == cpu_of(this_rq))) 5596 resched_curr(rq); 5597 5598 raw_spin_rq_unlock_irqrestore(rq, flags); 5599 } 5600 5601 static void kick_cpus_irq_workfn(struct irq_work *irq_work) 5602 { 5603 struct rq *this_rq = this_rq(); 5604 struct scx_rq *this_scx = &this_rq->scx; 5605 struct scx_kick_syncs __rcu *ksyncs_pcpu = __this_cpu_read(scx_kick_syncs); 5606 bool should_wait = false; 5607 unsigned long *ksyncs; 5608 s32 cpu; 5609 5610 if (unlikely(!ksyncs_pcpu)) { 5611 pr_warn_once("kick_cpus_irq_workfn() called with NULL scx_kick_syncs"); 5612 return; 5613 } 5614 5615 ksyncs = rcu_dereference_bh(ksyncs_pcpu)->syncs; 5616 5617 for_each_cpu(cpu, this_scx->cpus_to_kick) { 5618 should_wait |= kick_one_cpu(cpu, this_rq, ksyncs); 5619 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick); 5620 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle); 5621 } 5622 5623 for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) { 5624 kick_one_cpu_if_idle(cpu, this_rq); 5625 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle); 5626 } 5627 5628 if (!should_wait) 5629 return; 5630 5631 for_each_cpu(cpu, this_scx->cpus_to_wait) { 5632 unsigned long *wait_kick_sync = &cpu_rq(cpu)->scx.kick_sync; 5633 5634 /* 5635 * Busy-wait until the task running at the time of kicking is no 5636 * longer running. This can be used to implement e.g. core 5637 * scheduling. 5638 * 5639 * smp_cond_load_acquire() pairs with store_releases in 5640 * pick_task_scx() and put_prev_task_scx(). The former breaks 5641 * the wait if SCX's scheduling path is entered even if the same 5642 * task is picked subsequently. The latter is necessary to break 5643 * the wait when $cpu is taken by a higher sched class. 5644 */ 5645 if (cpu != cpu_of(this_rq)) 5646 smp_cond_load_acquire(wait_kick_sync, VAL != ksyncs[cpu]); 5647 5648 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); 5649 } 5650 } 5651 5652 /** 5653 * print_scx_info - print out sched_ext scheduler state 5654 * @log_lvl: the log level to use when printing 5655 * @p: target task 5656 * 5657 * If a sched_ext scheduler is enabled, print the name and state of the 5658 * scheduler. If @p is on sched_ext, print further information about the task. 5659 * 5660 * This function can be safely called on any task as long as the task_struct 5661 * itself is accessible. While safe, this function isn't synchronized and may 5662 * print out mixups or garbages of limited length. 5663 */ 5664 void print_scx_info(const char *log_lvl, struct task_struct *p) 5665 { 5666 struct scx_sched *sch = scx_root; 5667 enum scx_enable_state state = scx_enable_state(); 5668 const char *all = READ_ONCE(scx_switching_all) ? "+all" : ""; 5669 char runnable_at_buf[22] = "?"; 5670 struct sched_class *class; 5671 unsigned long runnable_at; 5672 5673 if (state == SCX_DISABLED) 5674 return; 5675 5676 /* 5677 * Carefully check if the task was running on sched_ext, and then 5678 * carefully copy the time it's been runnable, and its state. 5679 */ 5680 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) || 5681 class != &ext_sched_class) { 5682 printk("%sSched_ext: %s (%s%s)", log_lvl, sch->ops.name, 5683 scx_enable_state_str[state], all); 5684 return; 5685 } 5686 5687 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at, 5688 sizeof(runnable_at))) 5689 scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms", 5690 jiffies_delta_msecs(runnable_at, jiffies)); 5691 5692 /* print everything onto one line to conserve console space */ 5693 printk("%sSched_ext: %s (%s%s), task: runnable_at=%s", 5694 log_lvl, sch->ops.name, scx_enable_state_str[state], all, 5695 runnable_at_buf); 5696 } 5697 5698 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr) 5699 { 5700 /* 5701 * SCX schedulers often have userspace components which are sometimes 5702 * involved in critial scheduling paths. PM operations involve freezing 5703 * userspace which can lead to scheduling misbehaviors including stalls. 5704 * Let's bypass while PM operations are in progress. 5705 */ 5706 switch (event) { 5707 case PM_HIBERNATION_PREPARE: 5708 case PM_SUSPEND_PREPARE: 5709 case PM_RESTORE_PREPARE: 5710 scx_bypass(true); 5711 break; 5712 case PM_POST_HIBERNATION: 5713 case PM_POST_SUSPEND: 5714 case PM_POST_RESTORE: 5715 scx_bypass(false); 5716 break; 5717 } 5718 5719 return NOTIFY_OK; 5720 } 5721 5722 static struct notifier_block scx_pm_notifier = { 5723 .notifier_call = scx_pm_handler, 5724 }; 5725 5726 void __init init_sched_ext_class(void) 5727 { 5728 s32 cpu, v; 5729 5730 /* 5731 * The following is to prevent the compiler from optimizing out the enum 5732 * definitions so that BPF scheduler implementations can use them 5733 * through the generated vmlinux.h. 5734 */ 5735 WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT | 5736 SCX_TG_ONLINE); 5737 5738 scx_idle_init_masks(); 5739 5740 for_each_possible_cpu(cpu) { 5741 struct rq *rq = cpu_rq(cpu); 5742 int n = cpu_to_node(cpu); 5743 5744 init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); 5745 init_dsq(&rq->scx.bypass_dsq, SCX_DSQ_BYPASS); 5746 INIT_LIST_HEAD(&rq->scx.runnable_list); 5747 INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals); 5748 5749 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n)); 5750 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n)); 5751 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n)); 5752 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n)); 5753 rq->scx.deferred_irq_work = IRQ_WORK_INIT_HARD(deferred_irq_workfn); 5754 rq->scx.kick_cpus_irq_work = IRQ_WORK_INIT_HARD(kick_cpus_irq_workfn); 5755 5756 if (cpu_online(cpu)) 5757 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE; 5758 } 5759 5760 register_sysrq_key('S', &sysrq_sched_ext_reset_op); 5761 register_sysrq_key('D', &sysrq_sched_ext_dump_op); 5762 INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn); 5763 } 5764 5765 5766 /******************************************************************************** 5767 * Helpers that can be called from the BPF scheduler. 5768 */ 5769 static bool scx_dsq_insert_preamble(struct scx_sched *sch, struct task_struct *p, 5770 u64 enq_flags) 5771 { 5772 if (!scx_kf_allowed(sch, SCX_KF_ENQUEUE | SCX_KF_DISPATCH)) 5773 return false; 5774 5775 lockdep_assert_irqs_disabled(); 5776 5777 if (unlikely(!p)) { 5778 scx_error(sch, "called with NULL task"); 5779 return false; 5780 } 5781 5782 if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) { 5783 scx_error(sch, "invalid enq_flags 0x%llx", enq_flags); 5784 return false; 5785 } 5786 5787 return true; 5788 } 5789 5790 static void scx_dsq_insert_commit(struct scx_sched *sch, struct task_struct *p, 5791 u64 dsq_id, u64 enq_flags) 5792 { 5793 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 5794 struct task_struct *ddsp_task; 5795 5796 ddsp_task = __this_cpu_read(direct_dispatch_task); 5797 if (ddsp_task) { 5798 mark_direct_dispatch(sch, ddsp_task, p, dsq_id, enq_flags); 5799 return; 5800 } 5801 5802 if (unlikely(dspc->cursor >= scx_dsp_max_batch)) { 5803 scx_error(sch, "dispatch buffer overflow"); 5804 return; 5805 } 5806 5807 dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){ 5808 .task = p, 5809 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK, 5810 .dsq_id = dsq_id, 5811 .enq_flags = enq_flags, 5812 }; 5813 } 5814 5815 __bpf_kfunc_start_defs(); 5816 5817 /** 5818 * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ 5819 * @p: task_struct to insert 5820 * @dsq_id: DSQ to insert into 5821 * @slice: duration @p can run for in nsecs, 0 to keep the current value 5822 * @enq_flags: SCX_ENQ_* 5823 * 5824 * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to 5825 * call this function spuriously. Can be called from ops.enqueue(), 5826 * ops.select_cpu(), and ops.dispatch(). 5827 * 5828 * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch 5829 * and @p must match the task being enqueued. 5830 * 5831 * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p 5832 * will be directly inserted into the corresponding dispatch queue after 5833 * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be 5834 * inserted into the local DSQ of the CPU returned by ops.select_cpu(). 5835 * @enq_flags are OR'd with the enqueue flags on the enqueue path before the 5836 * task is inserted. 5837 * 5838 * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id 5839 * and this function can be called upto ops.dispatch_max_batch times to insert 5840 * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the 5841 * remaining slots. scx_bpf_dsq_move_to_local() flushes the batch and resets the 5842 * counter. 5843 * 5844 * This function doesn't have any locking restrictions and may be called under 5845 * BPF locks (in the future when BPF introduces more flexible locking). 5846 * 5847 * @p is allowed to run for @slice. The scheduling path is triggered on slice 5848 * exhaustion. If zero, the current residual slice is maintained. If 5849 * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with 5850 * scx_bpf_kick_cpu() to trigger scheduling. 5851 * 5852 * Returns %true on successful insertion, %false on failure. On the root 5853 * scheduler, %false return triggers scheduler abort and the caller doesn't need 5854 * to check the return value. 5855 */ 5856 __bpf_kfunc bool scx_bpf_dsq_insert___v2(struct task_struct *p, u64 dsq_id, 5857 u64 slice, u64 enq_flags) 5858 { 5859 struct scx_sched *sch; 5860 5861 guard(rcu)(); 5862 sch = rcu_dereference(scx_root); 5863 if (unlikely(!sch)) 5864 return false; 5865 5866 if (!scx_dsq_insert_preamble(sch, p, enq_flags)) 5867 return false; 5868 5869 if (slice) 5870 p->scx.slice = slice; 5871 else 5872 p->scx.slice = p->scx.slice ?: 1; 5873 5874 scx_dsq_insert_commit(sch, p, dsq_id, enq_flags); 5875 5876 return true; 5877 } 5878 5879 /* 5880 * COMPAT: Will be removed in v6.23 along with the ___v2 suffix. 5881 */ 5882 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, 5883 u64 slice, u64 enq_flags) 5884 { 5885 scx_bpf_dsq_insert___v2(p, dsq_id, slice, enq_flags); 5886 } 5887 5888 static bool scx_dsq_insert_vtime(struct scx_sched *sch, struct task_struct *p, 5889 u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) 5890 { 5891 if (!scx_dsq_insert_preamble(sch, p, enq_flags)) 5892 return false; 5893 5894 if (slice) 5895 p->scx.slice = slice; 5896 else 5897 p->scx.slice = p->scx.slice ?: 1; 5898 5899 p->scx.dsq_vtime = vtime; 5900 5901 scx_dsq_insert_commit(sch, p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); 5902 5903 return true; 5904 } 5905 5906 struct scx_bpf_dsq_insert_vtime_args { 5907 /* @p can't be packed together as KF_RCU is not transitive */ 5908 u64 dsq_id; 5909 u64 slice; 5910 u64 vtime; 5911 u64 enq_flags; 5912 }; 5913 5914 /** 5915 * __scx_bpf_dsq_insert_vtime - Arg-wrapped vtime DSQ insertion 5916 * @p: task_struct to insert 5917 * @args: struct containing the rest of the arguments 5918 * @args->dsq_id: DSQ to insert into 5919 * @args->slice: duration @p can run for in nsecs, 0 to keep the current value 5920 * @args->vtime: @p's ordering inside the vtime-sorted queue of the target DSQ 5921 * @args->enq_flags: SCX_ENQ_* 5922 * 5923 * Wrapper kfunc that takes arguments via struct to work around BPF's 5 argument 5924 * limit. BPF programs should use scx_bpf_dsq_insert_vtime() which is provided 5925 * as an inline wrapper in common.bpf.h. 5926 * 5927 * Insert @p into the vtime priority queue of the DSQ identified by 5928 * @args->dsq_id. Tasks queued into the priority queue are ordered by 5929 * @args->vtime. All other aspects are identical to scx_bpf_dsq_insert(). 5930 * 5931 * @args->vtime ordering is according to time_before64() which considers 5932 * wrapping. A numerically larger vtime may indicate an earlier position in the 5933 * ordering and vice-versa. 5934 * 5935 * A DSQ can only be used as a FIFO or priority queue at any given time and this 5936 * function must not be called on a DSQ which already has one or more FIFO tasks 5937 * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and 5938 * SCX_DSQ_GLOBAL) cannot be used as priority queues. 5939 * 5940 * Returns %true on successful insertion, %false on failure. On the root 5941 * scheduler, %false return triggers scheduler abort and the caller doesn't need 5942 * to check the return value. 5943 */ 5944 __bpf_kfunc bool 5945 __scx_bpf_dsq_insert_vtime(struct task_struct *p, 5946 struct scx_bpf_dsq_insert_vtime_args *args) 5947 { 5948 struct scx_sched *sch; 5949 5950 guard(rcu)(); 5951 5952 sch = rcu_dereference(scx_root); 5953 if (unlikely(!sch)) 5954 return false; 5955 5956 return scx_dsq_insert_vtime(sch, p, args->dsq_id, args->slice, 5957 args->vtime, args->enq_flags); 5958 } 5959 5960 /* 5961 * COMPAT: Will be removed in v6.23. 5962 */ 5963 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, 5964 u64 slice, u64 vtime, u64 enq_flags) 5965 { 5966 struct scx_sched *sch; 5967 5968 guard(rcu)(); 5969 5970 sch = rcu_dereference(scx_root); 5971 if (unlikely(!sch)) 5972 return; 5973 5974 scx_dsq_insert_vtime(sch, p, dsq_id, slice, vtime, enq_flags); 5975 } 5976 5977 __bpf_kfunc_end_defs(); 5978 5979 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch) 5980 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU) 5981 BTF_ID_FLAGS(func, scx_bpf_dsq_insert___v2, KF_RCU) 5982 BTF_ID_FLAGS(func, __scx_bpf_dsq_insert_vtime, KF_RCU) 5983 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU) 5984 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch) 5985 5986 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = { 5987 .owner = THIS_MODULE, 5988 .set = &scx_kfunc_ids_enqueue_dispatch, 5989 }; 5990 5991 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit, 5992 struct task_struct *p, u64 dsq_id, u64 enq_flags) 5993 { 5994 struct scx_sched *sch = scx_root; 5995 struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq; 5996 struct rq *this_rq, *src_rq, *locked_rq; 5997 bool dispatched = false; 5998 bool in_balance; 5999 unsigned long flags; 6000 6001 if (!scx_kf_allowed_if_unlocked() && 6002 !scx_kf_allowed(sch, SCX_KF_DISPATCH)) 6003 return false; 6004 6005 /* 6006 * If the BPF scheduler keeps calling this function repeatedly, it can 6007 * cause similar live-lock conditions as consume_dispatch_q(). 6008 */ 6009 if (unlikely(READ_ONCE(scx_aborting))) 6010 return false; 6011 6012 /* 6013 * Can be called from either ops.dispatch() locking this_rq() or any 6014 * context where no rq lock is held. If latter, lock @p's task_rq which 6015 * we'll likely need anyway. 6016 */ 6017 src_rq = task_rq(p); 6018 6019 local_irq_save(flags); 6020 this_rq = this_rq(); 6021 in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE; 6022 6023 if (in_balance) { 6024 if (this_rq != src_rq) { 6025 raw_spin_rq_unlock(this_rq); 6026 raw_spin_rq_lock(src_rq); 6027 } 6028 } else { 6029 raw_spin_rq_lock(src_rq); 6030 } 6031 6032 locked_rq = src_rq; 6033 raw_spin_lock(&src_dsq->lock); 6034 6035 /* 6036 * Did someone else get to it? @p could have already left $src_dsq, got 6037 * re-enqueud, or be in the process of being consumed by someone else. 6038 */ 6039 if (unlikely(p->scx.dsq != src_dsq || 6040 u32_before(kit->cursor.priv, p->scx.dsq_seq) || 6041 p->scx.holding_cpu >= 0) || 6042 WARN_ON_ONCE(src_rq != task_rq(p))) { 6043 raw_spin_unlock(&src_dsq->lock); 6044 goto out; 6045 } 6046 6047 /* @p is still on $src_dsq and stable, determine the destination */ 6048 dst_dsq = find_dsq_for_dispatch(sch, this_rq, dsq_id, p); 6049 6050 /* 6051 * Apply vtime and slice updates before moving so that the new time is 6052 * visible before inserting into $dst_dsq. @p is still on $src_dsq but 6053 * this is safe as we're locking it. 6054 */ 6055 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME) 6056 p->scx.dsq_vtime = kit->vtime; 6057 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE) 6058 p->scx.slice = kit->slice; 6059 6060 /* execute move */ 6061 locked_rq = move_task_between_dsqs(sch, p, enq_flags, src_dsq, dst_dsq); 6062 dispatched = true; 6063 out: 6064 if (in_balance) { 6065 if (this_rq != locked_rq) { 6066 raw_spin_rq_unlock(locked_rq); 6067 raw_spin_rq_lock(this_rq); 6068 } 6069 } else { 6070 raw_spin_rq_unlock_irqrestore(locked_rq, flags); 6071 } 6072 6073 kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE | 6074 __SCX_DSQ_ITER_HAS_VTIME); 6075 return dispatched; 6076 } 6077 6078 __bpf_kfunc_start_defs(); 6079 6080 /** 6081 * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots 6082 * 6083 * Can only be called from ops.dispatch(). 6084 */ 6085 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void) 6086 { 6087 struct scx_sched *sch; 6088 6089 guard(rcu)(); 6090 6091 sch = rcu_dereference(scx_root); 6092 if (unlikely(!sch)) 6093 return 0; 6094 6095 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH)) 6096 return 0; 6097 6098 return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor); 6099 } 6100 6101 /** 6102 * scx_bpf_dispatch_cancel - Cancel the latest dispatch 6103 * 6104 * Cancel the latest dispatch. Can be called multiple times to cancel further 6105 * dispatches. Can only be called from ops.dispatch(). 6106 */ 6107 __bpf_kfunc void scx_bpf_dispatch_cancel(void) 6108 { 6109 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 6110 struct scx_sched *sch; 6111 6112 guard(rcu)(); 6113 6114 sch = rcu_dereference(scx_root); 6115 if (unlikely(!sch)) 6116 return; 6117 6118 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH)) 6119 return; 6120 6121 if (dspc->cursor > 0) 6122 dspc->cursor--; 6123 else 6124 scx_error(sch, "dispatch buffer underflow"); 6125 } 6126 6127 /** 6128 * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ 6129 * @dsq_id: DSQ to move task from 6130 * 6131 * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's 6132 * local DSQ for execution. Can only be called from ops.dispatch(). 6133 * 6134 * This function flushes the in-flight dispatches from scx_bpf_dsq_insert() 6135 * before trying to move from the specified DSQ. It may also grab rq locks and 6136 * thus can't be called under any BPF locks. 6137 * 6138 * Returns %true if a task has been moved, %false if there isn't any task to 6139 * move. 6140 */ 6141 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id) 6142 { 6143 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 6144 struct scx_dispatch_q *dsq; 6145 struct scx_sched *sch; 6146 6147 guard(rcu)(); 6148 6149 sch = rcu_dereference(scx_root); 6150 if (unlikely(!sch)) 6151 return false; 6152 6153 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH)) 6154 return false; 6155 6156 flush_dispatch_buf(sch, dspc->rq); 6157 6158 dsq = find_user_dsq(sch, dsq_id); 6159 if (unlikely(!dsq)) { 6160 scx_error(sch, "invalid DSQ ID 0x%016llx", dsq_id); 6161 return false; 6162 } 6163 6164 if (consume_dispatch_q(sch, dspc->rq, dsq)) { 6165 /* 6166 * A successfully consumed task can be dequeued before it starts 6167 * running while the CPU is trying to migrate other dispatched 6168 * tasks. Bump nr_tasks to tell balance_one() to retry on empty 6169 * local DSQ. 6170 */ 6171 dspc->nr_tasks++; 6172 return true; 6173 } else { 6174 return false; 6175 } 6176 } 6177 6178 /** 6179 * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs 6180 * @it__iter: DSQ iterator in progress 6181 * @slice: duration the moved task can run for in nsecs 6182 * 6183 * Override the slice of the next task that will be moved from @it__iter using 6184 * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous 6185 * slice duration is kept. 6186 */ 6187 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, 6188 u64 slice) 6189 { 6190 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter; 6191 6192 kit->slice = slice; 6193 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE; 6194 } 6195 6196 /** 6197 * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs 6198 * @it__iter: DSQ iterator in progress 6199 * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ 6200 * 6201 * Override the vtime of the next task that will be moved from @it__iter using 6202 * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice 6203 * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the 6204 * override is ignored and cleared. 6205 */ 6206 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, 6207 u64 vtime) 6208 { 6209 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter; 6210 6211 kit->vtime = vtime; 6212 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME; 6213 } 6214 6215 /** 6216 * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ 6217 * @it__iter: DSQ iterator in progress 6218 * @p: task to transfer 6219 * @dsq_id: DSQ to move @p to 6220 * @enq_flags: SCX_ENQ_* 6221 * 6222 * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ 6223 * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can 6224 * be the destination. 6225 * 6226 * For the transfer to be successful, @p must still be on the DSQ and have been 6227 * queued before the DSQ iteration started. This function doesn't care whether 6228 * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have 6229 * been queued before the iteration started. 6230 * 6231 * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update. 6232 * 6233 * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq 6234 * lock (e.g. BPF timers or SYSCALL programs). 6235 * 6236 * Returns %true if @p has been consumed, %false if @p had already been 6237 * consumed, dequeued, or, for sub-scheds, @dsq_id points to a disallowed local 6238 * DSQ. 6239 */ 6240 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, 6241 struct task_struct *p, u64 dsq_id, 6242 u64 enq_flags) 6243 { 6244 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter, 6245 p, dsq_id, enq_flags); 6246 } 6247 6248 /** 6249 * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ 6250 * @it__iter: DSQ iterator in progress 6251 * @p: task to transfer 6252 * @dsq_id: DSQ to move @p to 6253 * @enq_flags: SCX_ENQ_* 6254 * 6255 * Transfer @p which is on the DSQ currently iterated by @it__iter to the 6256 * priority queue of the DSQ specified by @dsq_id. The destination must be a 6257 * user DSQ as only user DSQs support priority queue. 6258 * 6259 * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice() 6260 * and scx_bpf_dsq_move_set_vtime() to update. 6261 * 6262 * All other aspects are identical to scx_bpf_dsq_move(). See 6263 * scx_bpf_dsq_insert_vtime() for more information on @vtime. 6264 */ 6265 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, 6266 struct task_struct *p, u64 dsq_id, 6267 u64 enq_flags) 6268 { 6269 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter, 6270 p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); 6271 } 6272 6273 __bpf_kfunc_end_defs(); 6274 6275 BTF_KFUNCS_START(scx_kfunc_ids_dispatch) 6276 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots) 6277 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel) 6278 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local) 6279 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU) 6280 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU) 6281 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU) 6282 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU) 6283 BTF_KFUNCS_END(scx_kfunc_ids_dispatch) 6284 6285 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = { 6286 .owner = THIS_MODULE, 6287 .set = &scx_kfunc_ids_dispatch, 6288 }; 6289 6290 static u32 reenq_local(struct rq *rq) 6291 { 6292 LIST_HEAD(tasks); 6293 u32 nr_enqueued = 0; 6294 struct task_struct *p, *n; 6295 6296 lockdep_assert_rq_held(rq); 6297 6298 /* 6299 * The BPF scheduler may choose to dispatch tasks back to 6300 * @rq->scx.local_dsq. Move all candidate tasks off to a private list 6301 * first to avoid processing the same tasks repeatedly. 6302 */ 6303 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list, 6304 scx.dsq_list.node) { 6305 /* 6306 * If @p is being migrated, @p's current CPU may not agree with 6307 * its allowed CPUs and the migration_cpu_stop is about to 6308 * deactivate and re-activate @p anyway. Skip re-enqueueing. 6309 * 6310 * While racing sched property changes may also dequeue and 6311 * re-enqueue a migrating task while its current CPU and allowed 6312 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to 6313 * the current local DSQ for running tasks and thus are not 6314 * visible to the BPF scheduler. 6315 */ 6316 if (p->migration_pending) 6317 continue; 6318 6319 dispatch_dequeue(rq, p); 6320 list_add_tail(&p->scx.dsq_list.node, &tasks); 6321 } 6322 6323 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) { 6324 list_del_init(&p->scx.dsq_list.node); 6325 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1); 6326 nr_enqueued++; 6327 } 6328 6329 return nr_enqueued; 6330 } 6331 6332 __bpf_kfunc_start_defs(); 6333 6334 /** 6335 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ 6336 * 6337 * Iterate over all of the tasks currently enqueued on the local DSQ of the 6338 * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of 6339 * processed tasks. Can only be called from ops.cpu_release(). 6340 * 6341 * COMPAT: Will be removed in v6.23 along with the ___v2 suffix on the void 6342 * returning variant that can be called from anywhere. 6343 */ 6344 __bpf_kfunc u32 scx_bpf_reenqueue_local(void) 6345 { 6346 struct scx_sched *sch; 6347 struct rq *rq; 6348 6349 guard(rcu)(); 6350 sch = rcu_dereference(scx_root); 6351 if (unlikely(!sch)) 6352 return 0; 6353 6354 if (!scx_kf_allowed(sch, SCX_KF_CPU_RELEASE)) 6355 return 0; 6356 6357 rq = cpu_rq(smp_processor_id()); 6358 lockdep_assert_rq_held(rq); 6359 6360 return reenq_local(rq); 6361 } 6362 6363 __bpf_kfunc_end_defs(); 6364 6365 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release) 6366 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local) 6367 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release) 6368 6369 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = { 6370 .owner = THIS_MODULE, 6371 .set = &scx_kfunc_ids_cpu_release, 6372 }; 6373 6374 __bpf_kfunc_start_defs(); 6375 6376 /** 6377 * scx_bpf_create_dsq - Create a custom DSQ 6378 * @dsq_id: DSQ to create 6379 * @node: NUMA node to allocate from 6380 * 6381 * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable 6382 * scx callback, and any BPF_PROG_TYPE_SYSCALL prog. 6383 */ 6384 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) 6385 { 6386 struct scx_dispatch_q *dsq; 6387 struct scx_sched *sch; 6388 s32 ret; 6389 6390 if (unlikely(node >= (int)nr_node_ids || 6391 (node < 0 && node != NUMA_NO_NODE))) 6392 return -EINVAL; 6393 6394 if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN)) 6395 return -EINVAL; 6396 6397 dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node); 6398 if (!dsq) 6399 return -ENOMEM; 6400 6401 init_dsq(dsq, dsq_id); 6402 6403 rcu_read_lock(); 6404 6405 sch = rcu_dereference(scx_root); 6406 if (sch) 6407 ret = rhashtable_lookup_insert_fast(&sch->dsq_hash, &dsq->hash_node, 6408 dsq_hash_params); 6409 else 6410 ret = -ENODEV; 6411 6412 rcu_read_unlock(); 6413 if (ret) 6414 kfree(dsq); 6415 return ret; 6416 } 6417 6418 __bpf_kfunc_end_defs(); 6419 6420 BTF_KFUNCS_START(scx_kfunc_ids_unlocked) 6421 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE) 6422 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU) 6423 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU) 6424 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU) 6425 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU) 6426 BTF_KFUNCS_END(scx_kfunc_ids_unlocked) 6427 6428 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = { 6429 .owner = THIS_MODULE, 6430 .set = &scx_kfunc_ids_unlocked, 6431 }; 6432 6433 __bpf_kfunc_start_defs(); 6434 6435 /** 6436 * scx_bpf_task_set_slice - Set task's time slice 6437 * @p: task of interest 6438 * @slice: time slice to set in nsecs 6439 * 6440 * Set @p's time slice to @slice. Returns %true on success, %false if the 6441 * calling scheduler doesn't have authority over @p. 6442 */ 6443 __bpf_kfunc bool scx_bpf_task_set_slice(struct task_struct *p, u64 slice) 6444 { 6445 p->scx.slice = slice; 6446 return true; 6447 } 6448 6449 /** 6450 * scx_bpf_task_set_dsq_vtime - Set task's virtual time for DSQ ordering 6451 * @p: task of interest 6452 * @vtime: virtual time to set 6453 * 6454 * Set @p's virtual time to @vtime. Returns %true on success, %false if the 6455 * calling scheduler doesn't have authority over @p. 6456 */ 6457 __bpf_kfunc bool scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime) 6458 { 6459 p->scx.dsq_vtime = vtime; 6460 return true; 6461 } 6462 6463 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags) 6464 { 6465 struct rq *this_rq; 6466 unsigned long irq_flags; 6467 6468 if (!ops_cpu_valid(sch, cpu, NULL)) 6469 return; 6470 6471 local_irq_save(irq_flags); 6472 6473 this_rq = this_rq(); 6474 6475 /* 6476 * While bypassing for PM ops, IRQ handling may not be online which can 6477 * lead to irq_work_queue() malfunction such as infinite busy wait for 6478 * IRQ status update. Suppress kicking. 6479 */ 6480 if (scx_rq_bypassing(this_rq)) 6481 goto out; 6482 6483 /* 6484 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting 6485 * rq locks. We can probably be smarter and avoid bouncing if called 6486 * from ops which don't hold a rq lock. 6487 */ 6488 if (flags & SCX_KICK_IDLE) { 6489 struct rq *target_rq = cpu_rq(cpu); 6490 6491 if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT))) 6492 scx_error(sch, "PREEMPT/WAIT cannot be used with SCX_KICK_IDLE"); 6493 6494 if (raw_spin_rq_trylock(target_rq)) { 6495 if (can_skip_idle_kick(target_rq)) { 6496 raw_spin_rq_unlock(target_rq); 6497 goto out; 6498 } 6499 raw_spin_rq_unlock(target_rq); 6500 } 6501 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle); 6502 } else { 6503 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick); 6504 6505 if (flags & SCX_KICK_PREEMPT) 6506 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt); 6507 if (flags & SCX_KICK_WAIT) 6508 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait); 6509 } 6510 6511 irq_work_queue(&this_rq->scx.kick_cpus_irq_work); 6512 out: 6513 local_irq_restore(irq_flags); 6514 } 6515 6516 /** 6517 * scx_bpf_kick_cpu - Trigger reschedule on a CPU 6518 * @cpu: cpu to kick 6519 * @flags: %SCX_KICK_* flags 6520 * 6521 * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or 6522 * trigger rescheduling on a busy CPU. This can be called from any online 6523 * scx_ops operation and the actual kicking is performed asynchronously through 6524 * an irq work. 6525 */ 6526 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags) 6527 { 6528 struct scx_sched *sch; 6529 6530 guard(rcu)(); 6531 sch = rcu_dereference(scx_root); 6532 if (likely(sch)) 6533 scx_kick_cpu(sch, cpu, flags); 6534 } 6535 6536 /** 6537 * scx_bpf_dsq_nr_queued - Return the number of queued tasks 6538 * @dsq_id: id of the DSQ 6539 * 6540 * Return the number of tasks in the DSQ matching @dsq_id. If not found, 6541 * -%ENOENT is returned. 6542 */ 6543 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id) 6544 { 6545 struct scx_sched *sch; 6546 struct scx_dispatch_q *dsq; 6547 s32 ret; 6548 6549 preempt_disable(); 6550 6551 sch = rcu_dereference_sched(scx_root); 6552 if (unlikely(!sch)) { 6553 ret = -ENODEV; 6554 goto out; 6555 } 6556 6557 if (dsq_id == SCX_DSQ_LOCAL) { 6558 ret = READ_ONCE(this_rq()->scx.local_dsq.nr); 6559 goto out; 6560 } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) { 6561 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK; 6562 6563 if (ops_cpu_valid(sch, cpu, NULL)) { 6564 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr); 6565 goto out; 6566 } 6567 } else { 6568 dsq = find_user_dsq(sch, dsq_id); 6569 if (dsq) { 6570 ret = READ_ONCE(dsq->nr); 6571 goto out; 6572 } 6573 } 6574 ret = -ENOENT; 6575 out: 6576 preempt_enable(); 6577 return ret; 6578 } 6579 6580 /** 6581 * scx_bpf_destroy_dsq - Destroy a custom DSQ 6582 * @dsq_id: DSQ to destroy 6583 * 6584 * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with 6585 * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is 6586 * empty and no further tasks are dispatched to it. Ignored if called on a DSQ 6587 * which doesn't exist. Can be called from any online scx_ops operations. 6588 */ 6589 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id) 6590 { 6591 struct scx_sched *sch; 6592 6593 rcu_read_lock(); 6594 sch = rcu_dereference(scx_root); 6595 if (sch) 6596 destroy_dsq(sch, dsq_id); 6597 rcu_read_unlock(); 6598 } 6599 6600 /** 6601 * bpf_iter_scx_dsq_new - Create a DSQ iterator 6602 * @it: iterator to initialize 6603 * @dsq_id: DSQ to iterate 6604 * @flags: %SCX_DSQ_ITER_* 6605 * 6606 * Initialize BPF iterator @it which can be used with bpf_for_each() to walk 6607 * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes 6608 * tasks which are already queued when this function is invoked. 6609 */ 6610 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, 6611 u64 flags) 6612 { 6613 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 6614 struct scx_sched *sch; 6615 6616 BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) > 6617 sizeof(struct bpf_iter_scx_dsq)); 6618 BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) != 6619 __alignof__(struct bpf_iter_scx_dsq)); 6620 BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS & 6621 ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1)); 6622 6623 /* 6624 * next() and destroy() will be called regardless of the return value. 6625 * Always clear $kit->dsq. 6626 */ 6627 kit->dsq = NULL; 6628 6629 sch = rcu_dereference_check(scx_root, rcu_read_lock_bh_held()); 6630 if (unlikely(!sch)) 6631 return -ENODEV; 6632 6633 if (flags & ~__SCX_DSQ_ITER_USER_FLAGS) 6634 return -EINVAL; 6635 6636 kit->dsq = find_user_dsq(sch, dsq_id); 6637 if (!kit->dsq) 6638 return -ENOENT; 6639 6640 kit->cursor = INIT_DSQ_LIST_CURSOR(kit->cursor, flags, 6641 READ_ONCE(kit->dsq->seq)); 6642 6643 return 0; 6644 } 6645 6646 /** 6647 * bpf_iter_scx_dsq_next - Progress a DSQ iterator 6648 * @it: iterator to progress 6649 * 6650 * Return the next task. See bpf_iter_scx_dsq_new(). 6651 */ 6652 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) 6653 { 6654 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 6655 bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV; 6656 struct task_struct *p; 6657 unsigned long flags; 6658 6659 if (!kit->dsq) 6660 return NULL; 6661 6662 raw_spin_lock_irqsave(&kit->dsq->lock, flags); 6663 6664 if (list_empty(&kit->cursor.node)) 6665 p = NULL; 6666 else 6667 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list); 6668 6669 /* 6670 * Only tasks which were queued before the iteration started are 6671 * visible. This bounds BPF iterations and guarantees that vtime never 6672 * jumps in the other direction while iterating. 6673 */ 6674 do { 6675 p = nldsq_next_task(kit->dsq, p, rev); 6676 } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq))); 6677 6678 if (p) { 6679 if (rev) 6680 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node); 6681 else 6682 list_move(&kit->cursor.node, &p->scx.dsq_list.node); 6683 } else { 6684 list_del_init(&kit->cursor.node); 6685 } 6686 6687 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags); 6688 6689 return p; 6690 } 6691 6692 /** 6693 * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator 6694 * @it: iterator to destroy 6695 * 6696 * Undo scx_iter_scx_dsq_new(). 6697 */ 6698 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) 6699 { 6700 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 6701 6702 if (!kit->dsq) 6703 return; 6704 6705 if (!list_empty(&kit->cursor.node)) { 6706 unsigned long flags; 6707 6708 raw_spin_lock_irqsave(&kit->dsq->lock, flags); 6709 list_del_init(&kit->cursor.node); 6710 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags); 6711 } 6712 kit->dsq = NULL; 6713 } 6714 6715 /** 6716 * scx_bpf_dsq_peek - Lockless peek at the first element. 6717 * @dsq_id: DSQ to examine. 6718 * 6719 * Read the first element in the DSQ. This is semantically equivalent to using 6720 * the DSQ iterator, but is lockfree. Of course, like any lockless operation, 6721 * this provides only a point-in-time snapshot, and the contents may change 6722 * by the time any subsequent locking operation reads the queue. 6723 * 6724 * Returns the pointer, or NULL indicates an empty queue OR internal error. 6725 */ 6726 __bpf_kfunc struct task_struct *scx_bpf_dsq_peek(u64 dsq_id) 6727 { 6728 struct scx_sched *sch; 6729 struct scx_dispatch_q *dsq; 6730 6731 sch = rcu_dereference(scx_root); 6732 if (unlikely(!sch)) 6733 return NULL; 6734 6735 if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN)) { 6736 scx_error(sch, "peek disallowed on builtin DSQ 0x%llx", dsq_id); 6737 return NULL; 6738 } 6739 6740 dsq = find_user_dsq(sch, dsq_id); 6741 if (unlikely(!dsq)) { 6742 scx_error(sch, "peek on non-existent DSQ 0x%llx", dsq_id); 6743 return NULL; 6744 } 6745 6746 return rcu_dereference(dsq->first_task); 6747 } 6748 6749 __bpf_kfunc_end_defs(); 6750 6751 static s32 __bstr_format(struct scx_sched *sch, u64 *data_buf, char *line_buf, 6752 size_t line_size, char *fmt, unsigned long long *data, 6753 u32 data__sz) 6754 { 6755 struct bpf_bprintf_data bprintf_data = { .get_bin_args = true }; 6756 s32 ret; 6757 6758 if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 || 6759 (data__sz && !data)) { 6760 scx_error(sch, "invalid data=%p and data__sz=%u", (void *)data, data__sz); 6761 return -EINVAL; 6762 } 6763 6764 ret = copy_from_kernel_nofault(data_buf, data, data__sz); 6765 if (ret < 0) { 6766 scx_error(sch, "failed to read data fields (%d)", ret); 6767 return ret; 6768 } 6769 6770 ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8, 6771 &bprintf_data); 6772 if (ret < 0) { 6773 scx_error(sch, "format preparation failed (%d)", ret); 6774 return ret; 6775 } 6776 6777 ret = bstr_printf(line_buf, line_size, fmt, 6778 bprintf_data.bin_args); 6779 bpf_bprintf_cleanup(&bprintf_data); 6780 if (ret < 0) { 6781 scx_error(sch, "(\"%s\", %p, %u) failed to format", fmt, data, data__sz); 6782 return ret; 6783 } 6784 6785 return ret; 6786 } 6787 6788 static s32 bstr_format(struct scx_sched *sch, struct scx_bstr_buf *buf, 6789 char *fmt, unsigned long long *data, u32 data__sz) 6790 { 6791 return __bstr_format(sch, buf->data, buf->line, sizeof(buf->line), 6792 fmt, data, data__sz); 6793 } 6794 6795 __bpf_kfunc_start_defs(); 6796 6797 /** 6798 * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler. 6799 * @exit_code: Exit value to pass to user space via struct scx_exit_info. 6800 * @fmt: error message format string 6801 * @data: format string parameters packaged using ___bpf_fill() macro 6802 * @data__sz: @data len, must end in '__sz' for the verifier 6803 * 6804 * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops 6805 * disabling. 6806 */ 6807 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt, 6808 unsigned long long *data, u32 data__sz) 6809 { 6810 struct scx_sched *sch; 6811 unsigned long flags; 6812 6813 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags); 6814 sch = rcu_dereference_bh(scx_root); 6815 if (likely(sch) && 6816 bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0) 6817 scx_exit(sch, SCX_EXIT_UNREG_BPF, exit_code, "%s", scx_exit_bstr_buf.line); 6818 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags); 6819 } 6820 6821 /** 6822 * scx_bpf_error_bstr - Indicate fatal error 6823 * @fmt: error message format string 6824 * @data: format string parameters packaged using ___bpf_fill() macro 6825 * @data__sz: @data len, must end in '__sz' for the verifier 6826 * 6827 * Indicate that the BPF scheduler encountered a fatal error and initiate ops 6828 * disabling. 6829 */ 6830 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data, 6831 u32 data__sz) 6832 { 6833 struct scx_sched *sch; 6834 unsigned long flags; 6835 6836 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags); 6837 sch = rcu_dereference_bh(scx_root); 6838 if (likely(sch) && 6839 bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0) 6840 scx_exit(sch, SCX_EXIT_ERROR_BPF, 0, "%s", scx_exit_bstr_buf.line); 6841 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags); 6842 } 6843 6844 /** 6845 * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler 6846 * @fmt: format string 6847 * @data: format string parameters packaged using ___bpf_fill() macro 6848 * @data__sz: @data len, must end in '__sz' for the verifier 6849 * 6850 * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and 6851 * dump_task() to generate extra debug dump specific to the BPF scheduler. 6852 * 6853 * The extra dump may be multiple lines. A single line may be split over 6854 * multiple calls. The last line is automatically terminated. 6855 */ 6856 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, 6857 u32 data__sz) 6858 { 6859 struct scx_sched *sch; 6860 struct scx_dump_data *dd = &scx_dump_data; 6861 struct scx_bstr_buf *buf = &dd->buf; 6862 s32 ret; 6863 6864 guard(rcu)(); 6865 6866 sch = rcu_dereference(scx_root); 6867 if (unlikely(!sch)) 6868 return; 6869 6870 if (raw_smp_processor_id() != dd->cpu) { 6871 scx_error(sch, "scx_bpf_dump() must only be called from ops.dump() and friends"); 6872 return; 6873 } 6874 6875 /* append the formatted string to the line buf */ 6876 ret = __bstr_format(sch, buf->data, buf->line + dd->cursor, 6877 sizeof(buf->line) - dd->cursor, fmt, data, data__sz); 6878 if (ret < 0) { 6879 dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)", 6880 dd->prefix, fmt, data, data__sz, ret); 6881 return; 6882 } 6883 6884 dd->cursor += ret; 6885 dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line)); 6886 6887 if (!dd->cursor) 6888 return; 6889 6890 /* 6891 * If the line buf overflowed or ends in a newline, flush it into the 6892 * dump. This is to allow the caller to generate a single line over 6893 * multiple calls. As ops_dump_flush() can also handle multiple lines in 6894 * the line buf, the only case which can lead to an unexpected 6895 * truncation is when the caller keeps generating newlines in the middle 6896 * instead of the end consecutively. Don't do that. 6897 */ 6898 if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n') 6899 ops_dump_flush(); 6900 } 6901 6902 /** 6903 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ 6904 * 6905 * Iterate over all of the tasks currently enqueued on the local DSQ of the 6906 * caller's CPU, and re-enqueue them in the BPF scheduler. Can be called from 6907 * anywhere. 6908 */ 6909 __bpf_kfunc void scx_bpf_reenqueue_local___v2(void) 6910 { 6911 struct rq *rq; 6912 6913 guard(preempt)(); 6914 6915 rq = this_rq(); 6916 local_set(&rq->scx.reenq_local_deferred, 1); 6917 schedule_deferred(rq); 6918 } 6919 6920 /** 6921 * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU 6922 * @cpu: CPU of interest 6923 * 6924 * Return the maximum relative capacity of @cpu in relation to the most 6925 * performant CPU in the system. The return value is in the range [1, 6926 * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur(). 6927 */ 6928 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu) 6929 { 6930 struct scx_sched *sch; 6931 6932 guard(rcu)(); 6933 6934 sch = rcu_dereference(scx_root); 6935 if (likely(sch) && ops_cpu_valid(sch, cpu, NULL)) 6936 return arch_scale_cpu_capacity(cpu); 6937 else 6938 return SCX_CPUPERF_ONE; 6939 } 6940 6941 /** 6942 * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU 6943 * @cpu: CPU of interest 6944 * 6945 * Return the current relative performance of @cpu in relation to its maximum. 6946 * The return value is in the range [1, %SCX_CPUPERF_ONE]. 6947 * 6948 * The current performance level of a CPU in relation to the maximum performance 6949 * available in the system can be calculated as follows: 6950 * 6951 * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE 6952 * 6953 * The result is in the range [1, %SCX_CPUPERF_ONE]. 6954 */ 6955 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu) 6956 { 6957 struct scx_sched *sch; 6958 6959 guard(rcu)(); 6960 6961 sch = rcu_dereference(scx_root); 6962 if (likely(sch) && ops_cpu_valid(sch, cpu, NULL)) 6963 return arch_scale_freq_capacity(cpu); 6964 else 6965 return SCX_CPUPERF_ONE; 6966 } 6967 6968 /** 6969 * scx_bpf_cpuperf_set - Set the relative performance target of a CPU 6970 * @cpu: CPU of interest 6971 * @perf: target performance level [0, %SCX_CPUPERF_ONE] 6972 * 6973 * Set the target performance level of @cpu to @perf. @perf is in linear 6974 * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the 6975 * schedutil cpufreq governor chooses the target frequency. 6976 * 6977 * The actual performance level chosen, CPU grouping, and the overhead and 6978 * latency of the operations are dependent on the hardware and cpufreq driver in 6979 * use. Consult hardware and cpufreq documentation for more information. The 6980 * current performance level can be monitored using scx_bpf_cpuperf_cur(). 6981 */ 6982 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf) 6983 { 6984 struct scx_sched *sch; 6985 6986 guard(rcu)(); 6987 6988 sch = rcu_dereference(scx_root); 6989 if (unlikely(!sch)) 6990 return; 6991 6992 if (unlikely(perf > SCX_CPUPERF_ONE)) { 6993 scx_error(sch, "Invalid cpuperf target %u for CPU %d", perf, cpu); 6994 return; 6995 } 6996 6997 if (ops_cpu_valid(sch, cpu, NULL)) { 6998 struct rq *rq = cpu_rq(cpu), *locked_rq = scx_locked_rq(); 6999 struct rq_flags rf; 7000 7001 /* 7002 * When called with an rq lock held, restrict the operation 7003 * to the corresponding CPU to prevent ABBA deadlocks. 7004 */ 7005 if (locked_rq && rq != locked_rq) { 7006 scx_error(sch, "Invalid target CPU %d", cpu); 7007 return; 7008 } 7009 7010 /* 7011 * If no rq lock is held, allow to operate on any CPU by 7012 * acquiring the corresponding rq lock. 7013 */ 7014 if (!locked_rq) { 7015 rq_lock_irqsave(rq, &rf); 7016 update_rq_clock(rq); 7017 } 7018 7019 rq->scx.cpuperf_target = perf; 7020 cpufreq_update_util(rq, 0); 7021 7022 if (!locked_rq) 7023 rq_unlock_irqrestore(rq, &rf); 7024 } 7025 } 7026 7027 /** 7028 * scx_bpf_nr_node_ids - Return the number of possible node IDs 7029 * 7030 * All valid node IDs in the system are smaller than the returned value. 7031 */ 7032 __bpf_kfunc u32 scx_bpf_nr_node_ids(void) 7033 { 7034 return nr_node_ids; 7035 } 7036 7037 /** 7038 * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs 7039 * 7040 * All valid CPU IDs in the system are smaller than the returned value. 7041 */ 7042 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void) 7043 { 7044 return nr_cpu_ids; 7045 } 7046 7047 /** 7048 * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask 7049 */ 7050 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void) 7051 { 7052 return cpu_possible_mask; 7053 } 7054 7055 /** 7056 * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask 7057 */ 7058 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void) 7059 { 7060 return cpu_online_mask; 7061 } 7062 7063 /** 7064 * scx_bpf_put_cpumask - Release a possible/online cpumask 7065 * @cpumask: cpumask to release 7066 */ 7067 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask) 7068 { 7069 /* 7070 * Empty function body because we aren't actually acquiring or releasing 7071 * a reference to a global cpumask, which is read-only in the caller and 7072 * is never released. The acquire / release semantics here are just used 7073 * to make the cpumask is a trusted pointer in the caller. 7074 */ 7075 } 7076 7077 /** 7078 * scx_bpf_task_running - Is task currently running? 7079 * @p: task of interest 7080 */ 7081 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p) 7082 { 7083 return task_rq(p)->curr == p; 7084 } 7085 7086 /** 7087 * scx_bpf_task_cpu - CPU a task is currently associated with 7088 * @p: task of interest 7089 */ 7090 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p) 7091 { 7092 return task_cpu(p); 7093 } 7094 7095 /** 7096 * scx_bpf_cpu_rq - Fetch the rq of a CPU 7097 * @cpu: CPU of the rq 7098 */ 7099 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu) 7100 { 7101 struct scx_sched *sch; 7102 7103 guard(rcu)(); 7104 7105 sch = rcu_dereference(scx_root); 7106 if (unlikely(!sch)) 7107 return NULL; 7108 7109 if (!ops_cpu_valid(sch, cpu, NULL)) 7110 return NULL; 7111 7112 if (!sch->warned_deprecated_rq) { 7113 printk_deferred(KERN_WARNING "sched_ext: %s() is deprecated; " 7114 "use scx_bpf_locked_rq() when holding rq lock " 7115 "or scx_bpf_cpu_curr() to read remote curr safely.\n", __func__); 7116 sch->warned_deprecated_rq = true; 7117 } 7118 7119 return cpu_rq(cpu); 7120 } 7121 7122 /** 7123 * scx_bpf_locked_rq - Return the rq currently locked by SCX 7124 * 7125 * Returns the rq if a rq lock is currently held by SCX. 7126 * Otherwise emits an error and returns NULL. 7127 */ 7128 __bpf_kfunc struct rq *scx_bpf_locked_rq(void) 7129 { 7130 struct scx_sched *sch; 7131 struct rq *rq; 7132 7133 guard(preempt)(); 7134 7135 sch = rcu_dereference_sched(scx_root); 7136 if (unlikely(!sch)) 7137 return NULL; 7138 7139 rq = scx_locked_rq(); 7140 if (!rq) { 7141 scx_error(sch, "accessing rq without holding rq lock"); 7142 return NULL; 7143 } 7144 7145 return rq; 7146 } 7147 7148 /** 7149 * scx_bpf_cpu_curr - Return remote CPU's curr task 7150 * @cpu: CPU of interest 7151 * 7152 * Callers must hold RCU read lock (KF_RCU). 7153 */ 7154 __bpf_kfunc struct task_struct *scx_bpf_cpu_curr(s32 cpu) 7155 { 7156 struct scx_sched *sch; 7157 7158 guard(rcu)(); 7159 7160 sch = rcu_dereference(scx_root); 7161 if (unlikely(!sch)) 7162 return NULL; 7163 7164 if (!ops_cpu_valid(sch, cpu, NULL)) 7165 return NULL; 7166 7167 return rcu_dereference(cpu_rq(cpu)->curr); 7168 } 7169 7170 /** 7171 * scx_bpf_task_cgroup - Return the sched cgroup of a task 7172 * @p: task of interest 7173 * 7174 * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with 7175 * from the scheduler's POV. SCX operations should use this function to 7176 * determine @p's current cgroup as, unlike following @p->cgroups, 7177 * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all 7178 * rq-locked operations. Can be called on the parameter tasks of rq-locked 7179 * operations. The restriction guarantees that @p's rq is locked by the caller. 7180 */ 7181 #ifdef CONFIG_CGROUP_SCHED 7182 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) 7183 { 7184 struct task_group *tg = p->sched_task_group; 7185 struct cgroup *cgrp = &cgrp_dfl_root.cgrp; 7186 struct scx_sched *sch; 7187 7188 guard(rcu)(); 7189 7190 sch = rcu_dereference(scx_root); 7191 if (unlikely(!sch)) 7192 goto out; 7193 7194 if (!scx_kf_allowed_on_arg_tasks(sch, __SCX_KF_RQ_LOCKED, p)) 7195 goto out; 7196 7197 cgrp = tg_cgrp(tg); 7198 7199 out: 7200 cgroup_get(cgrp); 7201 return cgrp; 7202 } 7203 #endif 7204 7205 /** 7206 * scx_bpf_now - Returns a high-performance monotonically non-decreasing 7207 * clock for the current CPU. The clock returned is in nanoseconds. 7208 * 7209 * It provides the following properties: 7210 * 7211 * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently 7212 * to account for execution time and track tasks' runtime properties. 7213 * Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which 7214 * eventually reads a hardware timestamp counter -- is neither performant nor 7215 * scalable. scx_bpf_now() aims to provide a high-performance clock by 7216 * using the rq clock in the scheduler core whenever possible. 7217 * 7218 * 2) High enough resolution for the BPF scheduler use cases: In most BPF 7219 * scheduler use cases, the required clock resolution is lower than the most 7220 * accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically 7221 * uses the rq clock in the scheduler core whenever it is valid. It considers 7222 * that the rq clock is valid from the time the rq clock is updated 7223 * (update_rq_clock) until the rq is unlocked (rq_unpin_lock). 7224 * 7225 * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now() 7226 * guarantees the clock never goes backward when comparing them in the same 7227 * CPU. On the other hand, when comparing clocks in different CPUs, there 7228 * is no such guarantee -- the clock can go backward. It provides a 7229 * monotonically *non-decreasing* clock so that it would provide the same 7230 * clock values in two different scx_bpf_now() calls in the same CPU 7231 * during the same period of when the rq clock is valid. 7232 */ 7233 __bpf_kfunc u64 scx_bpf_now(void) 7234 { 7235 struct rq *rq; 7236 u64 clock; 7237 7238 preempt_disable(); 7239 7240 rq = this_rq(); 7241 if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) { 7242 /* 7243 * If the rq clock is valid, use the cached rq clock. 7244 * 7245 * Note that scx_bpf_now() is re-entrant between a process 7246 * context and an interrupt context (e.g., timer interrupt). 7247 * However, we don't need to consider the race between them 7248 * because such race is not observable from a caller. 7249 */ 7250 clock = READ_ONCE(rq->scx.clock); 7251 } else { 7252 /* 7253 * Otherwise, return a fresh rq clock. 7254 * 7255 * The rq clock is updated outside of the rq lock. 7256 * In this case, keep the updated rq clock invalid so the next 7257 * kfunc call outside the rq lock gets a fresh rq clock. 7258 */ 7259 clock = sched_clock_cpu(cpu_of(rq)); 7260 } 7261 7262 preempt_enable(); 7263 7264 return clock; 7265 } 7266 7267 static void scx_read_events(struct scx_sched *sch, struct scx_event_stats *events) 7268 { 7269 struct scx_event_stats *e_cpu; 7270 int cpu; 7271 7272 /* Aggregate per-CPU event counters into @events. */ 7273 memset(events, 0, sizeof(*events)); 7274 for_each_possible_cpu(cpu) { 7275 e_cpu = &per_cpu_ptr(sch->pcpu, cpu)->event_stats; 7276 scx_agg_event(events, e_cpu, SCX_EV_SELECT_CPU_FALLBACK); 7277 scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE); 7278 scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_KEEP_LAST); 7279 scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_EXITING); 7280 scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED); 7281 scx_agg_event(events, e_cpu, SCX_EV_REFILL_SLICE_DFL); 7282 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DURATION); 7283 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DISPATCH); 7284 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_ACTIVATE); 7285 } 7286 } 7287 7288 /* 7289 * scx_bpf_events - Get a system-wide event counter to 7290 * @events: output buffer from a BPF program 7291 * @events__sz: @events len, must end in '__sz'' for the verifier 7292 */ 7293 __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events, 7294 size_t events__sz) 7295 { 7296 struct scx_sched *sch; 7297 struct scx_event_stats e_sys; 7298 7299 rcu_read_lock(); 7300 sch = rcu_dereference(scx_root); 7301 if (sch) 7302 scx_read_events(sch, &e_sys); 7303 else 7304 memset(&e_sys, 0, sizeof(e_sys)); 7305 rcu_read_unlock(); 7306 7307 /* 7308 * We cannot entirely trust a BPF-provided size since a BPF program 7309 * might be compiled against a different vmlinux.h, of which 7310 * scx_event_stats would be larger (a newer vmlinux.h) or smaller 7311 * (an older vmlinux.h). Hence, we use the smaller size to avoid 7312 * memory corruption. 7313 */ 7314 events__sz = min(events__sz, sizeof(*events)); 7315 memcpy(events, &e_sys, events__sz); 7316 } 7317 7318 __bpf_kfunc_end_defs(); 7319 7320 BTF_KFUNCS_START(scx_kfunc_ids_any) 7321 BTF_ID_FLAGS(func, scx_bpf_task_set_slice, KF_RCU); 7322 BTF_ID_FLAGS(func, scx_bpf_task_set_dsq_vtime, KF_RCU); 7323 BTF_ID_FLAGS(func, scx_bpf_kick_cpu) 7324 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued) 7325 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq) 7326 BTF_ID_FLAGS(func, scx_bpf_dsq_peek, KF_RCU_PROTECTED | KF_RET_NULL) 7327 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED) 7328 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL) 7329 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY) 7330 BTF_ID_FLAGS(func, scx_bpf_exit_bstr) 7331 BTF_ID_FLAGS(func, scx_bpf_error_bstr) 7332 BTF_ID_FLAGS(func, scx_bpf_dump_bstr) 7333 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local___v2) 7334 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap) 7335 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur) 7336 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set) 7337 BTF_ID_FLAGS(func, scx_bpf_nr_node_ids) 7338 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids) 7339 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE) 7340 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE) 7341 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE) 7342 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU) 7343 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU) 7344 BTF_ID_FLAGS(func, scx_bpf_cpu_rq) 7345 BTF_ID_FLAGS(func, scx_bpf_locked_rq, KF_RET_NULL) 7346 BTF_ID_FLAGS(func, scx_bpf_cpu_curr, KF_RET_NULL | KF_RCU_PROTECTED) 7347 #ifdef CONFIG_CGROUP_SCHED 7348 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE) 7349 #endif 7350 BTF_ID_FLAGS(func, scx_bpf_now) 7351 BTF_ID_FLAGS(func, scx_bpf_events) 7352 BTF_KFUNCS_END(scx_kfunc_ids_any) 7353 7354 static const struct btf_kfunc_id_set scx_kfunc_set_any = { 7355 .owner = THIS_MODULE, 7356 .set = &scx_kfunc_ids_any, 7357 }; 7358 7359 static int __init scx_init(void) 7360 { 7361 int ret; 7362 7363 /* 7364 * kfunc registration can't be done from init_sched_ext_class() as 7365 * register_btf_kfunc_id_set() needs most of the system to be up. 7366 * 7367 * Some kfuncs are context-sensitive and can only be called from 7368 * specific SCX ops. They are grouped into BTF sets accordingly. 7369 * Unfortunately, BPF currently doesn't have a way of enforcing such 7370 * restrictions. Eventually, the verifier should be able to enforce 7371 * them. For now, register them the same and make each kfunc explicitly 7372 * check using scx_kf_allowed(). 7373 */ 7374 if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7375 &scx_kfunc_set_enqueue_dispatch)) || 7376 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7377 &scx_kfunc_set_dispatch)) || 7378 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7379 &scx_kfunc_set_cpu_release)) || 7380 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7381 &scx_kfunc_set_unlocked)) || 7382 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, 7383 &scx_kfunc_set_unlocked)) || 7384 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7385 &scx_kfunc_set_any)) || 7386 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, 7387 &scx_kfunc_set_any)) || 7388 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, 7389 &scx_kfunc_set_any))) { 7390 pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret); 7391 return ret; 7392 } 7393 7394 ret = scx_idle_init(); 7395 if (ret) { 7396 pr_err("sched_ext: Failed to initialize idle tracking (%d)\n", ret); 7397 return ret; 7398 } 7399 7400 ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops); 7401 if (ret) { 7402 pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret); 7403 return ret; 7404 } 7405 7406 ret = register_pm_notifier(&scx_pm_notifier); 7407 if (ret) { 7408 pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret); 7409 return ret; 7410 } 7411 7412 scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj); 7413 if (!scx_kset) { 7414 pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n"); 7415 return -ENOMEM; 7416 } 7417 7418 ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group); 7419 if (ret < 0) { 7420 pr_err("sched_ext: Failed to add global attributes\n"); 7421 return ret; 7422 } 7423 7424 if (!alloc_cpumask_var(&scx_bypass_lb_donee_cpumask, GFP_KERNEL) || 7425 !alloc_cpumask_var(&scx_bypass_lb_resched_cpumask, GFP_KERNEL)) { 7426 pr_err("sched_ext: Failed to allocate cpumasks\n"); 7427 return -ENOMEM; 7428 } 7429 7430 return 0; 7431 } 7432 __initcall(scx_init); 7433