1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst 4 * 5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org> 7 * Copyright (c) 2022 David Vernet <dvernet@meta.com> 8 */ 9 #include <linux/btf_ids.h> 10 #include "ext_idle.h" 11 12 /* 13 * NOTE: sched_ext is in the process of growing multiple scheduler support and 14 * scx_root usage is in a transitional state. Naked dereferences are safe if the 15 * caller is one of the tasks attached to SCX and explicit RCU dereference is 16 * necessary otherwise. Naked scx_root dereferences trigger sparse warnings but 17 * are used as temporary markers to indicate that the dereferences need to be 18 * updated to point to the associated scheduler instances rather than scx_root. 19 */ 20 static struct scx_sched __rcu *scx_root; 21 22 /* 23 * During exit, a task may schedule after losing its PIDs. When disabling the 24 * BPF scheduler, we need to be able to iterate tasks in every state to 25 * guarantee system safety. Maintain a dedicated task list which contains every 26 * task between its fork and eventual free. 27 */ 28 static DEFINE_RAW_SPINLOCK(scx_tasks_lock); 29 static LIST_HEAD(scx_tasks); 30 31 /* ops enable/disable */ 32 static DEFINE_MUTEX(scx_enable_mutex); 33 DEFINE_STATIC_KEY_FALSE(__scx_enabled); 34 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem); 35 static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED); 36 static int scx_bypass_depth; 37 static cpumask_var_t scx_bypass_lb_donee_cpumask; 38 static cpumask_var_t scx_bypass_lb_resched_cpumask; 39 static bool scx_aborting; 40 static bool scx_init_task_enabled; 41 static bool scx_switching_all; 42 DEFINE_STATIC_KEY_FALSE(__scx_switched_all); 43 44 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0); 45 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0); 46 47 /* 48 * A monotically increasing sequence number that is incremented every time a 49 * scheduler is enabled. This can be used by to check if any custom sched_ext 50 * scheduler has ever been used in the system. 51 */ 52 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0); 53 54 /* 55 * The maximum amount of time in jiffies that a task may be runnable without 56 * being scheduled on a CPU. If this timeout is exceeded, it will trigger 57 * scx_error(). 58 */ 59 static unsigned long scx_watchdog_timeout; 60 61 /* 62 * The last time the delayed work was run. This delayed work relies on 63 * ksoftirqd being able to run to service timer interrupts, so it's possible 64 * that this work itself could get wedged. To account for this, we check that 65 * it's not stalled in the timer tick, and trigger an error if it is. 66 */ 67 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES; 68 69 static struct delayed_work scx_watchdog_work; 70 71 /* 72 * For %SCX_KICK_WAIT: Each CPU has a pointer to an array of kick_sync sequence 73 * numbers. The arrays are allocated with kvzalloc() as size can exceed percpu 74 * allocator limits on large machines. O(nr_cpu_ids^2) allocation, allocated 75 * lazily when enabling and freed when disabling to avoid waste when sched_ext 76 * isn't active. 77 */ 78 struct scx_kick_syncs { 79 struct rcu_head rcu; 80 unsigned long syncs[]; 81 }; 82 83 static DEFINE_PER_CPU(struct scx_kick_syncs __rcu *, scx_kick_syncs); 84 85 /* 86 * Direct dispatch marker. 87 * 88 * Non-NULL values are used for direct dispatch from enqueue path. A valid 89 * pointer points to the task currently being enqueued. An ERR_PTR value is used 90 * to indicate that direct dispatch has already happened. 91 */ 92 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task); 93 94 static const struct rhashtable_params dsq_hash_params = { 95 .key_len = sizeof_field(struct scx_dispatch_q, id), 96 .key_offset = offsetof(struct scx_dispatch_q, id), 97 .head_offset = offsetof(struct scx_dispatch_q, hash_node), 98 }; 99 100 static LLIST_HEAD(dsqs_to_free); 101 102 /* dispatch buf */ 103 struct scx_dsp_buf_ent { 104 struct task_struct *task; 105 unsigned long qseq; 106 u64 dsq_id; 107 u64 enq_flags; 108 }; 109 110 static u32 scx_dsp_max_batch; 111 112 struct scx_dsp_ctx { 113 struct rq *rq; 114 u32 cursor; 115 u32 nr_tasks; 116 struct scx_dsp_buf_ent buf[]; 117 }; 118 119 static struct scx_dsp_ctx __percpu *scx_dsp_ctx; 120 121 /* string formatting from BPF */ 122 struct scx_bstr_buf { 123 u64 data[MAX_BPRINTF_VARARGS]; 124 char line[SCX_EXIT_MSG_LEN]; 125 }; 126 127 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock); 128 static struct scx_bstr_buf scx_exit_bstr_buf; 129 130 /* ops debug dump */ 131 struct scx_dump_data { 132 s32 cpu; 133 bool first; 134 s32 cursor; 135 struct seq_buf *s; 136 const char *prefix; 137 struct scx_bstr_buf buf; 138 }; 139 140 static struct scx_dump_data scx_dump_data = { 141 .cpu = -1, 142 }; 143 144 /* /sys/kernel/sched_ext interface */ 145 static struct kset *scx_kset; 146 147 /* 148 * Parameters that can be adjusted through /sys/module/sched_ext/parameters. 149 * There usually is no reason to modify these as normal scheduler operation 150 * shouldn't be affected by them. The knobs are primarily for debugging. 151 */ 152 static u64 scx_slice_dfl = SCX_SLICE_DFL; 153 static unsigned int scx_slice_bypass_us = SCX_SLICE_BYPASS / NSEC_PER_USEC; 154 static unsigned int scx_bypass_lb_intv_us = SCX_BYPASS_LB_DFL_INTV_US; 155 156 static int set_slice_us(const char *val, const struct kernel_param *kp) 157 { 158 return param_set_uint_minmax(val, kp, 100, 100 * USEC_PER_MSEC); 159 } 160 161 static const struct kernel_param_ops slice_us_param_ops = { 162 .set = set_slice_us, 163 .get = param_get_uint, 164 }; 165 166 static int set_bypass_lb_intv_us(const char *val, const struct kernel_param *kp) 167 { 168 return param_set_uint_minmax(val, kp, 0, 10 * USEC_PER_SEC); 169 } 170 171 static const struct kernel_param_ops bypass_lb_intv_us_param_ops = { 172 .set = set_bypass_lb_intv_us, 173 .get = param_get_uint, 174 }; 175 176 #undef MODULE_PARAM_PREFIX 177 #define MODULE_PARAM_PREFIX "sched_ext." 178 179 module_param_cb(slice_bypass_us, &slice_us_param_ops, &scx_slice_bypass_us, 0600); 180 MODULE_PARM_DESC(slice_bypass_us, "bypass slice in microseconds, applied on [un]load (100us to 100ms)"); 181 module_param_cb(bypass_lb_intv_us, &bypass_lb_intv_us_param_ops, &scx_bypass_lb_intv_us, 0600); 182 MODULE_PARM_DESC(bypass_lb_intv_us, "bypass load balance interval in microseconds (0 (disable) to 10s)"); 183 184 #undef MODULE_PARAM_PREFIX 185 186 #define CREATE_TRACE_POINTS 187 #include <trace/events/sched_ext.h> 188 189 static void process_ddsp_deferred_locals(struct rq *rq); 190 static u32 reenq_local(struct rq *rq); 191 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags); 192 static bool scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind, 193 s64 exit_code, const char *fmt, va_list args); 194 195 static __printf(4, 5) bool scx_exit(struct scx_sched *sch, 196 enum scx_exit_kind kind, s64 exit_code, 197 const char *fmt, ...) 198 { 199 va_list args; 200 bool ret; 201 202 va_start(args, fmt); 203 ret = scx_vexit(sch, kind, exit_code, fmt, args); 204 va_end(args); 205 206 return ret; 207 } 208 209 #define scx_error(sch, fmt, args...) scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args) 210 #define scx_verror(sch, fmt, args) scx_vexit((sch), SCX_EXIT_ERROR, 0, fmt, args) 211 212 #define SCX_HAS_OP(sch, op) test_bit(SCX_OP_IDX(op), (sch)->has_op) 213 214 static long jiffies_delta_msecs(unsigned long at, unsigned long now) 215 { 216 if (time_after(at, now)) 217 return jiffies_to_msecs(at - now); 218 else 219 return -(long)jiffies_to_msecs(now - at); 220 } 221 222 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */ 223 static u32 higher_bits(u32 flags) 224 { 225 return ~((1 << fls(flags)) - 1); 226 } 227 228 /* return the mask with only the highest bit set */ 229 static u32 highest_bit(u32 flags) 230 { 231 int bit = fls(flags); 232 return ((u64)1 << bit) >> 1; 233 } 234 235 static bool u32_before(u32 a, u32 b) 236 { 237 return (s32)(a - b) < 0; 238 } 239 240 static struct scx_dispatch_q *find_global_dsq(struct scx_sched *sch, 241 struct task_struct *p) 242 { 243 return sch->global_dsqs[cpu_to_node(task_cpu(p))]; 244 } 245 246 static struct scx_dispatch_q *find_user_dsq(struct scx_sched *sch, u64 dsq_id) 247 { 248 return rhashtable_lookup(&sch->dsq_hash, &dsq_id, dsq_hash_params); 249 } 250 251 static const struct sched_class *scx_setscheduler_class(struct task_struct *p) 252 { 253 if (p->sched_class == &stop_sched_class) 254 return &stop_sched_class; 255 256 return __setscheduler_class(p->policy, p->prio); 257 } 258 259 /* 260 * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX 261 * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate 262 * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check 263 * whether it's running from an allowed context. 264 * 265 * @mask is constant, always inline to cull the mask calculations. 266 */ 267 static __always_inline void scx_kf_allow(u32 mask) 268 { 269 /* nesting is allowed only in increasing scx_kf_mask order */ 270 WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask, 271 "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n", 272 current->scx.kf_mask, mask); 273 current->scx.kf_mask |= mask; 274 barrier(); 275 } 276 277 static void scx_kf_disallow(u32 mask) 278 { 279 barrier(); 280 current->scx.kf_mask &= ~mask; 281 } 282 283 /* 284 * Track the rq currently locked. 285 * 286 * This allows kfuncs to safely operate on rq from any scx ops callback, 287 * knowing which rq is already locked. 288 */ 289 DEFINE_PER_CPU(struct rq *, scx_locked_rq_state); 290 291 static inline void update_locked_rq(struct rq *rq) 292 { 293 /* 294 * Check whether @rq is actually locked. This can help expose bugs 295 * or incorrect assumptions about the context in which a kfunc or 296 * callback is executed. 297 */ 298 if (rq) 299 lockdep_assert_rq_held(rq); 300 __this_cpu_write(scx_locked_rq_state, rq); 301 } 302 303 #define SCX_CALL_OP(sch, mask, op, rq, args...) \ 304 do { \ 305 if (rq) \ 306 update_locked_rq(rq); \ 307 if (mask) { \ 308 scx_kf_allow(mask); \ 309 (sch)->ops.op(args); \ 310 scx_kf_disallow(mask); \ 311 } else { \ 312 (sch)->ops.op(args); \ 313 } \ 314 if (rq) \ 315 update_locked_rq(NULL); \ 316 } while (0) 317 318 #define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \ 319 ({ \ 320 __typeof__((sch)->ops.op(args)) __ret; \ 321 \ 322 if (rq) \ 323 update_locked_rq(rq); \ 324 if (mask) { \ 325 scx_kf_allow(mask); \ 326 __ret = (sch)->ops.op(args); \ 327 scx_kf_disallow(mask); \ 328 } else { \ 329 __ret = (sch)->ops.op(args); \ 330 } \ 331 if (rq) \ 332 update_locked_rq(NULL); \ 333 __ret; \ 334 }) 335 336 /* 337 * Some kfuncs are allowed only on the tasks that are subjects of the 338 * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such 339 * restrictions, the following SCX_CALL_OP_*() variants should be used when 340 * invoking scx_ops operations that take task arguments. These can only be used 341 * for non-nesting operations due to the way the tasks are tracked. 342 * 343 * kfuncs which can only operate on such tasks can in turn use 344 * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on 345 * the specific task. 346 */ 347 #define SCX_CALL_OP_TASK(sch, mask, op, rq, task, args...) \ 348 do { \ 349 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 350 current->scx.kf_tasks[0] = task; \ 351 SCX_CALL_OP((sch), mask, op, rq, task, ##args); \ 352 current->scx.kf_tasks[0] = NULL; \ 353 } while (0) 354 355 #define SCX_CALL_OP_TASK_RET(sch, mask, op, rq, task, args...) \ 356 ({ \ 357 __typeof__((sch)->ops.op(task, ##args)) __ret; \ 358 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 359 current->scx.kf_tasks[0] = task; \ 360 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task, ##args); \ 361 current->scx.kf_tasks[0] = NULL; \ 362 __ret; \ 363 }) 364 365 #define SCX_CALL_OP_2TASKS_RET(sch, mask, op, rq, task0, task1, args...) \ 366 ({ \ 367 __typeof__((sch)->ops.op(task0, task1, ##args)) __ret; \ 368 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 369 current->scx.kf_tasks[0] = task0; \ 370 current->scx.kf_tasks[1] = task1; \ 371 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task0, task1, ##args); \ 372 current->scx.kf_tasks[0] = NULL; \ 373 current->scx.kf_tasks[1] = NULL; \ 374 __ret; \ 375 }) 376 377 /* @mask is constant, always inline to cull unnecessary branches */ 378 static __always_inline bool scx_kf_allowed(struct scx_sched *sch, u32 mask) 379 { 380 if (unlikely(!(current->scx.kf_mask & mask))) { 381 scx_error(sch, "kfunc with mask 0x%x called from an operation only allowing 0x%x", 382 mask, current->scx.kf_mask); 383 return false; 384 } 385 386 /* 387 * Enforce nesting boundaries. e.g. A kfunc which can be called from 388 * DISPATCH must not be called if we're running DEQUEUE which is nested 389 * inside ops.dispatch(). We don't need to check boundaries for any 390 * blocking kfuncs as the verifier ensures they're only called from 391 * sleepable progs. 392 */ 393 if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE && 394 (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) { 395 scx_error(sch, "cpu_release kfunc called from a nested operation"); 396 return false; 397 } 398 399 if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH && 400 (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) { 401 scx_error(sch, "dispatch kfunc called from a nested operation"); 402 return false; 403 } 404 405 return true; 406 } 407 408 /* see SCX_CALL_OP_TASK() */ 409 static __always_inline bool scx_kf_allowed_on_arg_tasks(struct scx_sched *sch, 410 u32 mask, 411 struct task_struct *p) 412 { 413 if (!scx_kf_allowed(sch, mask)) 414 return false; 415 416 if (unlikely((p != current->scx.kf_tasks[0] && 417 p != current->scx.kf_tasks[1]))) { 418 scx_error(sch, "called on a task not being operated on"); 419 return false; 420 } 421 422 return true; 423 } 424 425 /** 426 * nldsq_next_task - Iterate to the next task in a non-local DSQ 427 * @dsq: user dsq being iterated 428 * @cur: current position, %NULL to start iteration 429 * @rev: walk backwards 430 * 431 * Returns %NULL when iteration is finished. 432 */ 433 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq, 434 struct task_struct *cur, bool rev) 435 { 436 struct list_head *list_node; 437 struct scx_dsq_list_node *dsq_lnode; 438 439 lockdep_assert_held(&dsq->lock); 440 441 if (cur) 442 list_node = &cur->scx.dsq_list.node; 443 else 444 list_node = &dsq->list; 445 446 /* find the next task, need to skip BPF iteration cursors */ 447 do { 448 if (rev) 449 list_node = list_node->prev; 450 else 451 list_node = list_node->next; 452 453 if (list_node == &dsq->list) 454 return NULL; 455 456 dsq_lnode = container_of(list_node, struct scx_dsq_list_node, 457 node); 458 } while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR); 459 460 return container_of(dsq_lnode, struct task_struct, scx.dsq_list); 461 } 462 463 #define nldsq_for_each_task(p, dsq) \ 464 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \ 465 (p) = nldsq_next_task((dsq), (p), false)) 466 467 468 /* 469 * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse] 470 * dispatch order. BPF-visible iterator is opaque and larger to allow future 471 * changes without breaking backward compatibility. Can be used with 472 * bpf_for_each(). See bpf_iter_scx_dsq_*(). 473 */ 474 enum scx_dsq_iter_flags { 475 /* iterate in the reverse dispatch order */ 476 SCX_DSQ_ITER_REV = 1U << 16, 477 478 __SCX_DSQ_ITER_HAS_SLICE = 1U << 30, 479 __SCX_DSQ_ITER_HAS_VTIME = 1U << 31, 480 481 __SCX_DSQ_ITER_USER_FLAGS = SCX_DSQ_ITER_REV, 482 __SCX_DSQ_ITER_ALL_FLAGS = __SCX_DSQ_ITER_USER_FLAGS | 483 __SCX_DSQ_ITER_HAS_SLICE | 484 __SCX_DSQ_ITER_HAS_VTIME, 485 }; 486 487 struct bpf_iter_scx_dsq_kern { 488 struct scx_dsq_list_node cursor; 489 struct scx_dispatch_q *dsq; 490 u64 slice; 491 u64 vtime; 492 } __attribute__((aligned(8))); 493 494 struct bpf_iter_scx_dsq { 495 u64 __opaque[6]; 496 } __attribute__((aligned(8))); 497 498 499 /* 500 * SCX task iterator. 501 */ 502 struct scx_task_iter { 503 struct sched_ext_entity cursor; 504 struct task_struct *locked_task; 505 struct rq *rq; 506 struct rq_flags rf; 507 u32 cnt; 508 bool list_locked; 509 }; 510 511 /** 512 * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration 513 * @iter: iterator to init 514 * 515 * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter 516 * must eventually be stopped with scx_task_iter_stop(). 517 * 518 * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock() 519 * between this and the first next() call or between any two next() calls. If 520 * the locks are released between two next() calls, the caller is responsible 521 * for ensuring that the task being iterated remains accessible either through 522 * RCU read lock or obtaining a reference count. 523 * 524 * All tasks which existed when the iteration started are guaranteed to be 525 * visited as long as they are not dead. 526 */ 527 static void scx_task_iter_start(struct scx_task_iter *iter) 528 { 529 memset(iter, 0, sizeof(*iter)); 530 531 raw_spin_lock_irq(&scx_tasks_lock); 532 533 iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR }; 534 list_add(&iter->cursor.tasks_node, &scx_tasks); 535 iter->list_locked = true; 536 } 537 538 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter) 539 { 540 if (iter->locked_task) { 541 task_rq_unlock(iter->rq, iter->locked_task, &iter->rf); 542 iter->locked_task = NULL; 543 } 544 } 545 546 /** 547 * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator 548 * @iter: iterator to unlock 549 * 550 * If @iter is in the middle of a locked iteration, it may be locking the rq of 551 * the task currently being visited in addition to scx_tasks_lock. Unlock both. 552 * This function can be safely called anytime during an iteration. The next 553 * iterator operation will automatically restore the necessary locking. 554 */ 555 static void scx_task_iter_unlock(struct scx_task_iter *iter) 556 { 557 __scx_task_iter_rq_unlock(iter); 558 if (iter->list_locked) { 559 iter->list_locked = false; 560 raw_spin_unlock_irq(&scx_tasks_lock); 561 } 562 } 563 564 static void __scx_task_iter_maybe_relock(struct scx_task_iter *iter) 565 { 566 if (!iter->list_locked) { 567 raw_spin_lock_irq(&scx_tasks_lock); 568 iter->list_locked = true; 569 } 570 } 571 572 /** 573 * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock 574 * @iter: iterator to exit 575 * 576 * Exit a previously initialized @iter. Must be called with scx_tasks_lock held 577 * which is released on return. If the iterator holds a task's rq lock, that rq 578 * lock is also released. See scx_task_iter_start() for details. 579 */ 580 static void scx_task_iter_stop(struct scx_task_iter *iter) 581 { 582 __scx_task_iter_maybe_relock(iter); 583 list_del_init(&iter->cursor.tasks_node); 584 scx_task_iter_unlock(iter); 585 } 586 587 /** 588 * scx_task_iter_next - Next task 589 * @iter: iterator to walk 590 * 591 * Visit the next task. See scx_task_iter_start() for details. Locks are dropped 592 * and re-acquired every %SCX_TASK_ITER_BATCH iterations to avoid causing stalls 593 * by holding scx_tasks_lock for too long. 594 */ 595 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter) 596 { 597 struct list_head *cursor = &iter->cursor.tasks_node; 598 struct sched_ext_entity *pos; 599 600 if (!(++iter->cnt % SCX_TASK_ITER_BATCH)) { 601 scx_task_iter_unlock(iter); 602 cond_resched(); 603 } 604 605 __scx_task_iter_maybe_relock(iter); 606 607 list_for_each_entry(pos, cursor, tasks_node) { 608 if (&pos->tasks_node == &scx_tasks) 609 return NULL; 610 if (!(pos->flags & SCX_TASK_CURSOR)) { 611 list_move(cursor, &pos->tasks_node); 612 return container_of(pos, struct task_struct, scx); 613 } 614 } 615 616 /* can't happen, should always terminate at scx_tasks above */ 617 BUG(); 618 } 619 620 /** 621 * scx_task_iter_next_locked - Next non-idle task with its rq locked 622 * @iter: iterator to walk 623 * 624 * Visit the non-idle task with its rq lock held. Allows callers to specify 625 * whether they would like to filter out dead tasks. See scx_task_iter_start() 626 * for details. 627 */ 628 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter) 629 { 630 struct task_struct *p; 631 632 __scx_task_iter_rq_unlock(iter); 633 634 while ((p = scx_task_iter_next(iter))) { 635 /* 636 * scx_task_iter is used to prepare and move tasks into SCX 637 * while loading the BPF scheduler and vice-versa while 638 * unloading. The init_tasks ("swappers") should be excluded 639 * from the iteration because: 640 * 641 * - It's unsafe to use __setschduler_prio() on an init_task to 642 * determine the sched_class to use as it won't preserve its 643 * idle_sched_class. 644 * 645 * - ops.init/exit_task() can easily be confused if called with 646 * init_tasks as they, e.g., share PID 0. 647 * 648 * As init_tasks are never scheduled through SCX, they can be 649 * skipped safely. Note that is_idle_task() which tests %PF_IDLE 650 * doesn't work here: 651 * 652 * - %PF_IDLE may not be set for an init_task whose CPU hasn't 653 * yet been onlined. 654 * 655 * - %PF_IDLE can be set on tasks that are not init_tasks. See 656 * play_idle_precise() used by CONFIG_IDLE_INJECT. 657 * 658 * Test for idle_sched_class as only init_tasks are on it. 659 */ 660 if (p->sched_class != &idle_sched_class) 661 break; 662 } 663 if (!p) 664 return NULL; 665 666 iter->rq = task_rq_lock(p, &iter->rf); 667 iter->locked_task = p; 668 669 return p; 670 } 671 672 /** 673 * scx_add_event - Increase an event counter for 'name' by 'cnt' 674 * @sch: scx_sched to account events for 675 * @name: an event name defined in struct scx_event_stats 676 * @cnt: the number of the event occurred 677 * 678 * This can be used when preemption is not disabled. 679 */ 680 #define scx_add_event(sch, name, cnt) do { \ 681 this_cpu_add((sch)->pcpu->event_stats.name, (cnt)); \ 682 trace_sched_ext_event(#name, (cnt)); \ 683 } while(0) 684 685 /** 686 * __scx_add_event - Increase an event counter for 'name' by 'cnt' 687 * @sch: scx_sched to account events for 688 * @name: an event name defined in struct scx_event_stats 689 * @cnt: the number of the event occurred 690 * 691 * This should be used only when preemption is disabled. 692 */ 693 #define __scx_add_event(sch, name, cnt) do { \ 694 __this_cpu_add((sch)->pcpu->event_stats.name, (cnt)); \ 695 trace_sched_ext_event(#name, cnt); \ 696 } while(0) 697 698 /** 699 * scx_agg_event - Aggregate an event counter 'kind' from 'src_e' to 'dst_e' 700 * @dst_e: destination event stats 701 * @src_e: source event stats 702 * @kind: a kind of event to be aggregated 703 */ 704 #define scx_agg_event(dst_e, src_e, kind) do { \ 705 (dst_e)->kind += READ_ONCE((src_e)->kind); \ 706 } while(0) 707 708 /** 709 * scx_dump_event - Dump an event 'kind' in 'events' to 's' 710 * @s: output seq_buf 711 * @events: event stats 712 * @kind: a kind of event to dump 713 */ 714 #define scx_dump_event(s, events, kind) do { \ 715 dump_line(&(s), "%40s: %16lld", #kind, (events)->kind); \ 716 } while (0) 717 718 719 static void scx_read_events(struct scx_sched *sch, 720 struct scx_event_stats *events); 721 722 static enum scx_enable_state scx_enable_state(void) 723 { 724 return atomic_read(&scx_enable_state_var); 725 } 726 727 static enum scx_enable_state scx_set_enable_state(enum scx_enable_state to) 728 { 729 return atomic_xchg(&scx_enable_state_var, to); 730 } 731 732 static bool scx_tryset_enable_state(enum scx_enable_state to, 733 enum scx_enable_state from) 734 { 735 int from_v = from; 736 737 return atomic_try_cmpxchg(&scx_enable_state_var, &from_v, to); 738 } 739 740 /** 741 * wait_ops_state - Busy-wait the specified ops state to end 742 * @p: target task 743 * @opss: state to wait the end of 744 * 745 * Busy-wait for @p to transition out of @opss. This can only be used when the 746 * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also 747 * has load_acquire semantics to ensure that the caller can see the updates made 748 * in the enqueueing and dispatching paths. 749 */ 750 static void wait_ops_state(struct task_struct *p, unsigned long opss) 751 { 752 do { 753 cpu_relax(); 754 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss); 755 } 756 757 static inline bool __cpu_valid(s32 cpu) 758 { 759 return likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu)); 760 } 761 762 /** 763 * ops_cpu_valid - Verify a cpu number, to be used on ops input args 764 * @sch: scx_sched to abort on error 765 * @cpu: cpu number which came from a BPF ops 766 * @where: extra information reported on error 767 * 768 * @cpu is a cpu number which came from the BPF scheduler and can be any value. 769 * Verify that it is in range and one of the possible cpus. If invalid, trigger 770 * an ops error. 771 */ 772 static bool ops_cpu_valid(struct scx_sched *sch, s32 cpu, const char *where) 773 { 774 if (__cpu_valid(cpu)) { 775 return true; 776 } else { 777 scx_error(sch, "invalid CPU %d%s%s", cpu, where ? " " : "", where ?: ""); 778 return false; 779 } 780 } 781 782 /** 783 * ops_sanitize_err - Sanitize a -errno value 784 * @sch: scx_sched to error out on error 785 * @ops_name: operation to blame on failure 786 * @err: -errno value to sanitize 787 * 788 * Verify @err is a valid -errno. If not, trigger scx_error() and return 789 * -%EPROTO. This is necessary because returning a rogue -errno up the chain can 790 * cause misbehaviors. For an example, a large negative return from 791 * ops.init_task() triggers an oops when passed up the call chain because the 792 * value fails IS_ERR() test after being encoded with ERR_PTR() and then is 793 * handled as a pointer. 794 */ 795 static int ops_sanitize_err(struct scx_sched *sch, const char *ops_name, s32 err) 796 { 797 if (err < 0 && err >= -MAX_ERRNO) 798 return err; 799 800 scx_error(sch, "ops.%s() returned an invalid errno %d", ops_name, err); 801 return -EPROTO; 802 } 803 804 static void run_deferred(struct rq *rq) 805 { 806 process_ddsp_deferred_locals(rq); 807 808 if (local_read(&rq->scx.reenq_local_deferred)) { 809 local_set(&rq->scx.reenq_local_deferred, 0); 810 reenq_local(rq); 811 } 812 } 813 814 static void deferred_bal_cb_workfn(struct rq *rq) 815 { 816 run_deferred(rq); 817 } 818 819 static void deferred_irq_workfn(struct irq_work *irq_work) 820 { 821 struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work); 822 823 raw_spin_rq_lock(rq); 824 run_deferred(rq); 825 raw_spin_rq_unlock(rq); 826 } 827 828 /** 829 * schedule_deferred - Schedule execution of deferred actions on an rq 830 * @rq: target rq 831 * 832 * Schedule execution of deferred actions on @rq. Deferred actions are executed 833 * with @rq locked but unpinned, and thus can unlock @rq to e.g. migrate tasks 834 * to other rqs. 835 */ 836 static void schedule_deferred(struct rq *rq) 837 { 838 /* 839 * Queue an irq work. They are executed on IRQ re-enable which may take 840 * a bit longer than the scheduler hook in schedule_deferred_locked(). 841 */ 842 irq_work_queue(&rq->scx.deferred_irq_work); 843 } 844 845 /** 846 * schedule_deferred_locked - Schedule execution of deferred actions on an rq 847 * @rq: target rq 848 * 849 * Schedule execution of deferred actions on @rq. Equivalent to 850 * schedule_deferred() but requires @rq to be locked and can be more efficient. 851 */ 852 static void schedule_deferred_locked(struct rq *rq) 853 { 854 lockdep_assert_rq_held(rq); 855 856 /* 857 * If in the middle of waking up a task, task_woken_scx() will be called 858 * afterwards which will then run the deferred actions, no need to 859 * schedule anything. 860 */ 861 if (rq->scx.flags & SCX_RQ_IN_WAKEUP) 862 return; 863 864 /* Don't do anything if there already is a deferred operation. */ 865 if (rq->scx.flags & SCX_RQ_BAL_CB_PENDING) 866 return; 867 868 /* 869 * If in balance, the balance callbacks will be called before rq lock is 870 * released. Schedule one. 871 * 872 * 873 * We can't directly insert the callback into the 874 * rq's list: The call can drop its lock and make the pending balance 875 * callback visible to unrelated code paths that call rq_pin_lock(). 876 * 877 * Just let balance_one() know that it must do it itself. 878 */ 879 if (rq->scx.flags & SCX_RQ_IN_BALANCE) { 880 rq->scx.flags |= SCX_RQ_BAL_CB_PENDING; 881 return; 882 } 883 884 /* 885 * No scheduler hooks available. Use the generic irq_work path. The 886 * above WAKEUP and BALANCE paths should cover most of the cases and the 887 * time to IRQ re-enable shouldn't be long. 888 */ 889 schedule_deferred(rq); 890 } 891 892 /** 893 * touch_core_sched - Update timestamp used for core-sched task ordering 894 * @rq: rq to read clock from, must be locked 895 * @p: task to update the timestamp for 896 * 897 * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to 898 * implement global or local-DSQ FIFO ordering for core-sched. Should be called 899 * when a task becomes runnable and its turn on the CPU ends (e.g. slice 900 * exhaustion). 901 */ 902 static void touch_core_sched(struct rq *rq, struct task_struct *p) 903 { 904 lockdep_assert_rq_held(rq); 905 906 #ifdef CONFIG_SCHED_CORE 907 /* 908 * It's okay to update the timestamp spuriously. Use 909 * sched_core_disabled() which is cheaper than enabled(). 910 * 911 * As this is used to determine ordering between tasks of sibling CPUs, 912 * it may be better to use per-core dispatch sequence instead. 913 */ 914 if (!sched_core_disabled()) 915 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq)); 916 #endif 917 } 918 919 /** 920 * touch_core_sched_dispatch - Update core-sched timestamp on dispatch 921 * @rq: rq to read clock from, must be locked 922 * @p: task being dispatched 923 * 924 * If the BPF scheduler implements custom core-sched ordering via 925 * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO 926 * ordering within each local DSQ. This function is called from dispatch paths 927 * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect. 928 */ 929 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p) 930 { 931 lockdep_assert_rq_held(rq); 932 933 #ifdef CONFIG_SCHED_CORE 934 if (unlikely(SCX_HAS_OP(scx_root, core_sched_before))) 935 touch_core_sched(rq, p); 936 #endif 937 } 938 939 static void update_curr_scx(struct rq *rq) 940 { 941 struct task_struct *curr = rq->curr; 942 s64 delta_exec; 943 944 delta_exec = update_curr_common(rq); 945 if (unlikely(delta_exec <= 0)) 946 return; 947 948 if (curr->scx.slice != SCX_SLICE_INF) { 949 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec); 950 if (!curr->scx.slice) 951 touch_core_sched(rq, curr); 952 } 953 } 954 955 static bool scx_dsq_priq_less(struct rb_node *node_a, 956 const struct rb_node *node_b) 957 { 958 const struct task_struct *a = 959 container_of(node_a, struct task_struct, scx.dsq_priq); 960 const struct task_struct *b = 961 container_of(node_b, struct task_struct, scx.dsq_priq); 962 963 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime); 964 } 965 966 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta) 967 { 968 /* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */ 969 WRITE_ONCE(dsq->nr, dsq->nr + delta); 970 } 971 972 static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p) 973 { 974 p->scx.slice = READ_ONCE(scx_slice_dfl); 975 __scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1); 976 } 977 978 static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq, 979 struct task_struct *p, u64 enq_flags) 980 { 981 bool is_local = dsq->id == SCX_DSQ_LOCAL; 982 983 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); 984 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) || 985 !RB_EMPTY_NODE(&p->scx.dsq_priq)); 986 987 if (!is_local) { 988 raw_spin_lock_nested(&dsq->lock, 989 (enq_flags & SCX_ENQ_NESTED) ? SINGLE_DEPTH_NESTING : 0); 990 991 if (unlikely(dsq->id == SCX_DSQ_INVALID)) { 992 scx_error(sch, "attempting to dispatch to a destroyed dsq"); 993 /* fall back to the global dsq */ 994 raw_spin_unlock(&dsq->lock); 995 dsq = find_global_dsq(sch, p); 996 raw_spin_lock(&dsq->lock); 997 } 998 } 999 1000 if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) && 1001 (enq_flags & SCX_ENQ_DSQ_PRIQ))) { 1002 /* 1003 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from 1004 * their FIFO queues. To avoid confusion and accidentally 1005 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we 1006 * disallow any internal DSQ from doing vtime ordering of 1007 * tasks. 1008 */ 1009 scx_error(sch, "cannot use vtime ordering for built-in DSQs"); 1010 enq_flags &= ~SCX_ENQ_DSQ_PRIQ; 1011 } 1012 1013 if (enq_flags & SCX_ENQ_DSQ_PRIQ) { 1014 struct rb_node *rbp; 1015 1016 /* 1017 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are 1018 * linked to both the rbtree and list on PRIQs, this can only be 1019 * tested easily when adding the first task. 1020 */ 1021 if (unlikely(RB_EMPTY_ROOT(&dsq->priq) && 1022 nldsq_next_task(dsq, NULL, false))) 1023 scx_error(sch, "DSQ ID 0x%016llx already had FIFO-enqueued tasks", 1024 dsq->id); 1025 1026 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ; 1027 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less); 1028 1029 /* 1030 * Find the previous task and insert after it on the list so 1031 * that @dsq->list is vtime ordered. 1032 */ 1033 rbp = rb_prev(&p->scx.dsq_priq); 1034 if (rbp) { 1035 struct task_struct *prev = 1036 container_of(rbp, struct task_struct, 1037 scx.dsq_priq); 1038 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node); 1039 /* first task unchanged - no update needed */ 1040 } else { 1041 list_add(&p->scx.dsq_list.node, &dsq->list); 1042 /* not builtin and new task is at head - use fastpath */ 1043 rcu_assign_pointer(dsq->first_task, p); 1044 } 1045 } else { 1046 /* a FIFO DSQ shouldn't be using PRIQ enqueuing */ 1047 if (unlikely(!RB_EMPTY_ROOT(&dsq->priq))) 1048 scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks", 1049 dsq->id); 1050 1051 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) { 1052 list_add(&p->scx.dsq_list.node, &dsq->list); 1053 /* new task inserted at head - use fastpath */ 1054 if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN)) 1055 rcu_assign_pointer(dsq->first_task, p); 1056 } else { 1057 bool was_empty; 1058 1059 was_empty = list_empty(&dsq->list); 1060 list_add_tail(&p->scx.dsq_list.node, &dsq->list); 1061 if (was_empty && !(dsq->id & SCX_DSQ_FLAG_BUILTIN)) 1062 rcu_assign_pointer(dsq->first_task, p); 1063 } 1064 } 1065 1066 /* seq records the order tasks are queued, used by BPF DSQ iterator */ 1067 dsq->seq++; 1068 p->scx.dsq_seq = dsq->seq; 1069 1070 dsq_mod_nr(dsq, 1); 1071 p->scx.dsq = dsq; 1072 1073 /* 1074 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the 1075 * direct dispatch path, but we clear them here because the direct 1076 * dispatch verdict may be overridden on the enqueue path during e.g. 1077 * bypass. 1078 */ 1079 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID; 1080 p->scx.ddsp_enq_flags = 0; 1081 1082 /* 1083 * We're transitioning out of QUEUEING or DISPATCHING. store_release to 1084 * match waiters' load_acquire. 1085 */ 1086 if (enq_flags & SCX_ENQ_CLEAR_OPSS) 1087 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1088 1089 if (is_local) { 1090 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq); 1091 bool preempt = false; 1092 1093 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr && 1094 rq->curr->sched_class == &ext_sched_class) { 1095 rq->curr->scx.slice = 0; 1096 preempt = true; 1097 } 1098 1099 if (preempt || sched_class_above(&ext_sched_class, 1100 rq->curr->sched_class)) 1101 resched_curr(rq); 1102 } else { 1103 raw_spin_unlock(&dsq->lock); 1104 } 1105 } 1106 1107 static void task_unlink_from_dsq(struct task_struct *p, 1108 struct scx_dispatch_q *dsq) 1109 { 1110 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node)); 1111 1112 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) { 1113 rb_erase(&p->scx.dsq_priq, &dsq->priq); 1114 RB_CLEAR_NODE(&p->scx.dsq_priq); 1115 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ; 1116 } 1117 1118 list_del_init(&p->scx.dsq_list.node); 1119 dsq_mod_nr(dsq, -1); 1120 1121 if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN) && dsq->first_task == p) { 1122 struct task_struct *first_task; 1123 1124 first_task = nldsq_next_task(dsq, NULL, false); 1125 rcu_assign_pointer(dsq->first_task, first_task); 1126 } 1127 } 1128 1129 static void dispatch_dequeue(struct rq *rq, struct task_struct *p) 1130 { 1131 struct scx_dispatch_q *dsq = p->scx.dsq; 1132 bool is_local = dsq == &rq->scx.local_dsq; 1133 1134 lockdep_assert_rq_held(rq); 1135 1136 if (!dsq) { 1137 /* 1138 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals. 1139 * Unlinking is all that's needed to cancel. 1140 */ 1141 if (unlikely(!list_empty(&p->scx.dsq_list.node))) 1142 list_del_init(&p->scx.dsq_list.node); 1143 1144 /* 1145 * When dispatching directly from the BPF scheduler to a local 1146 * DSQ, the task isn't associated with any DSQ but 1147 * @p->scx.holding_cpu may be set under the protection of 1148 * %SCX_OPSS_DISPATCHING. 1149 */ 1150 if (p->scx.holding_cpu >= 0) 1151 p->scx.holding_cpu = -1; 1152 1153 return; 1154 } 1155 1156 if (!is_local) 1157 raw_spin_lock(&dsq->lock); 1158 1159 /* 1160 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't 1161 * change underneath us. 1162 */ 1163 if (p->scx.holding_cpu < 0) { 1164 /* @p must still be on @dsq, dequeue */ 1165 task_unlink_from_dsq(p, dsq); 1166 } else { 1167 /* 1168 * We're racing against dispatch_to_local_dsq() which already 1169 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the 1170 * holding_cpu which tells dispatch_to_local_dsq() that it lost 1171 * the race. 1172 */ 1173 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node)); 1174 p->scx.holding_cpu = -1; 1175 } 1176 p->scx.dsq = NULL; 1177 1178 if (!is_local) 1179 raw_spin_unlock(&dsq->lock); 1180 } 1181 1182 /* 1183 * Abbreviated version of dispatch_dequeue() that can be used when both @p's rq 1184 * and dsq are locked. 1185 */ 1186 static void dispatch_dequeue_locked(struct task_struct *p, 1187 struct scx_dispatch_q *dsq) 1188 { 1189 lockdep_assert_rq_held(task_rq(p)); 1190 lockdep_assert_held(&dsq->lock); 1191 1192 task_unlink_from_dsq(p, dsq); 1193 p->scx.dsq = NULL; 1194 } 1195 1196 static struct scx_dispatch_q *find_dsq_for_dispatch(struct scx_sched *sch, 1197 struct rq *rq, u64 dsq_id, 1198 struct task_struct *p) 1199 { 1200 struct scx_dispatch_q *dsq; 1201 1202 if (dsq_id == SCX_DSQ_LOCAL) 1203 return &rq->scx.local_dsq; 1204 1205 if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) { 1206 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK; 1207 1208 if (!ops_cpu_valid(sch, cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict")) 1209 return find_global_dsq(sch, p); 1210 1211 return &cpu_rq(cpu)->scx.local_dsq; 1212 } 1213 1214 if (dsq_id == SCX_DSQ_GLOBAL) 1215 dsq = find_global_dsq(sch, p); 1216 else 1217 dsq = find_user_dsq(sch, dsq_id); 1218 1219 if (unlikely(!dsq)) { 1220 scx_error(sch, "non-existent DSQ 0x%llx for %s[%d]", 1221 dsq_id, p->comm, p->pid); 1222 return find_global_dsq(sch, p); 1223 } 1224 1225 return dsq; 1226 } 1227 1228 static void mark_direct_dispatch(struct scx_sched *sch, 1229 struct task_struct *ddsp_task, 1230 struct task_struct *p, u64 dsq_id, 1231 u64 enq_flags) 1232 { 1233 /* 1234 * Mark that dispatch already happened from ops.select_cpu() or 1235 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value 1236 * which can never match a valid task pointer. 1237 */ 1238 __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH)); 1239 1240 /* @p must match the task on the enqueue path */ 1241 if (unlikely(p != ddsp_task)) { 1242 if (IS_ERR(ddsp_task)) 1243 scx_error(sch, "%s[%d] already direct-dispatched", 1244 p->comm, p->pid); 1245 else 1246 scx_error(sch, "scheduling for %s[%d] but trying to direct-dispatch %s[%d]", 1247 ddsp_task->comm, ddsp_task->pid, 1248 p->comm, p->pid); 1249 return; 1250 } 1251 1252 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID); 1253 WARN_ON_ONCE(p->scx.ddsp_enq_flags); 1254 1255 p->scx.ddsp_dsq_id = dsq_id; 1256 p->scx.ddsp_enq_flags = enq_flags; 1257 } 1258 1259 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p, 1260 u64 enq_flags) 1261 { 1262 struct rq *rq = task_rq(p); 1263 struct scx_dispatch_q *dsq = 1264 find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); 1265 1266 touch_core_sched_dispatch(rq, p); 1267 1268 p->scx.ddsp_enq_flags |= enq_flags; 1269 1270 /* 1271 * We are in the enqueue path with @rq locked and pinned, and thus can't 1272 * double lock a remote rq and enqueue to its local DSQ. For 1273 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer 1274 * the enqueue so that it's executed when @rq can be unlocked. 1275 */ 1276 if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) { 1277 unsigned long opss; 1278 1279 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK; 1280 1281 switch (opss & SCX_OPSS_STATE_MASK) { 1282 case SCX_OPSS_NONE: 1283 break; 1284 case SCX_OPSS_QUEUEING: 1285 /* 1286 * As @p was never passed to the BPF side, _release is 1287 * not strictly necessary. Still do it for consistency. 1288 */ 1289 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1290 break; 1291 default: 1292 WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()", 1293 p->comm, p->pid, opss); 1294 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1295 break; 1296 } 1297 1298 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); 1299 list_add_tail(&p->scx.dsq_list.node, 1300 &rq->scx.ddsp_deferred_locals); 1301 schedule_deferred_locked(rq); 1302 return; 1303 } 1304 1305 dispatch_enqueue(sch, dsq, p, 1306 p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS); 1307 } 1308 1309 static bool scx_rq_online(struct rq *rq) 1310 { 1311 /* 1312 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates 1313 * the online state as seen from the BPF scheduler. cpu_active() test 1314 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will 1315 * stay set until the current scheduling operation is complete even if 1316 * we aren't locking @rq. 1317 */ 1318 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq))); 1319 } 1320 1321 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, 1322 int sticky_cpu) 1323 { 1324 struct scx_sched *sch = scx_root; 1325 struct task_struct **ddsp_taskp; 1326 struct scx_dispatch_q *dsq; 1327 unsigned long qseq; 1328 1329 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED)); 1330 1331 /* rq migration */ 1332 if (sticky_cpu == cpu_of(rq)) 1333 goto local_norefill; 1334 1335 /* 1336 * If !scx_rq_online(), we already told the BPF scheduler that the CPU 1337 * is offline and are just running the hotplug path. Don't bother the 1338 * BPF scheduler. 1339 */ 1340 if (!scx_rq_online(rq)) 1341 goto local; 1342 1343 if (scx_rq_bypassing(rq)) { 1344 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1); 1345 goto bypass; 1346 } 1347 1348 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) 1349 goto direct; 1350 1351 /* see %SCX_OPS_ENQ_EXITING */ 1352 if (!(sch->ops.flags & SCX_OPS_ENQ_EXITING) && 1353 unlikely(p->flags & PF_EXITING)) { 1354 __scx_add_event(sch, SCX_EV_ENQ_SKIP_EXITING, 1); 1355 goto local; 1356 } 1357 1358 /* see %SCX_OPS_ENQ_MIGRATION_DISABLED */ 1359 if (!(sch->ops.flags & SCX_OPS_ENQ_MIGRATION_DISABLED) && 1360 is_migration_disabled(p)) { 1361 __scx_add_event(sch, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1); 1362 goto local; 1363 } 1364 1365 if (unlikely(!SCX_HAS_OP(sch, enqueue))) 1366 goto global; 1367 1368 /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */ 1369 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT; 1370 1371 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); 1372 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq); 1373 1374 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task); 1375 WARN_ON_ONCE(*ddsp_taskp); 1376 *ddsp_taskp = p; 1377 1378 SCX_CALL_OP_TASK(sch, SCX_KF_ENQUEUE, enqueue, rq, p, enq_flags); 1379 1380 *ddsp_taskp = NULL; 1381 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) 1382 goto direct; 1383 1384 /* 1385 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or 1386 * dequeue may be waiting. The store_release matches their load_acquire. 1387 */ 1388 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq); 1389 return; 1390 1391 direct: 1392 direct_dispatch(sch, p, enq_flags); 1393 return; 1394 local_norefill: 1395 dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags); 1396 return; 1397 local: 1398 dsq = &rq->scx.local_dsq; 1399 goto enqueue; 1400 global: 1401 dsq = find_global_dsq(sch, p); 1402 goto enqueue; 1403 bypass: 1404 dsq = &task_rq(p)->scx.bypass_dsq; 1405 goto enqueue; 1406 1407 enqueue: 1408 /* 1409 * For task-ordering, slice refill must be treated as implying the end 1410 * of the current slice. Otherwise, the longer @p stays on the CPU, the 1411 * higher priority it becomes from scx_prio_less()'s POV. 1412 */ 1413 touch_core_sched(rq, p); 1414 refill_task_slice_dfl(sch, p); 1415 dispatch_enqueue(sch, dsq, p, enq_flags); 1416 } 1417 1418 static bool task_runnable(const struct task_struct *p) 1419 { 1420 return !list_empty(&p->scx.runnable_node); 1421 } 1422 1423 static void set_task_runnable(struct rq *rq, struct task_struct *p) 1424 { 1425 lockdep_assert_rq_held(rq); 1426 1427 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) { 1428 p->scx.runnable_at = jiffies; 1429 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT; 1430 } 1431 1432 /* 1433 * list_add_tail() must be used. scx_bypass() depends on tasks being 1434 * appended to the runnable_list. 1435 */ 1436 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list); 1437 } 1438 1439 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at) 1440 { 1441 list_del_init(&p->scx.runnable_node); 1442 if (reset_runnable_at) 1443 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; 1444 } 1445 1446 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags) 1447 { 1448 struct scx_sched *sch = scx_root; 1449 int sticky_cpu = p->scx.sticky_cpu; 1450 1451 if (enq_flags & ENQUEUE_WAKEUP) 1452 rq->scx.flags |= SCX_RQ_IN_WAKEUP; 1453 1454 enq_flags |= rq->scx.extra_enq_flags; 1455 1456 if (sticky_cpu >= 0) 1457 p->scx.sticky_cpu = -1; 1458 1459 /* 1460 * Restoring a running task will be immediately followed by 1461 * set_next_task_scx() which expects the task to not be on the BPF 1462 * scheduler as tasks can only start running through local DSQs. Force 1463 * direct-dispatch into the local DSQ by setting the sticky_cpu. 1464 */ 1465 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p)) 1466 sticky_cpu = cpu_of(rq); 1467 1468 if (p->scx.flags & SCX_TASK_QUEUED) { 1469 WARN_ON_ONCE(!task_runnable(p)); 1470 goto out; 1471 } 1472 1473 set_task_runnable(rq, p); 1474 p->scx.flags |= SCX_TASK_QUEUED; 1475 rq->scx.nr_running++; 1476 add_nr_running(rq, 1); 1477 1478 if (SCX_HAS_OP(sch, runnable) && !task_on_rq_migrating(p)) 1479 SCX_CALL_OP_TASK(sch, SCX_KF_REST, runnable, rq, p, enq_flags); 1480 1481 if (enq_flags & SCX_ENQ_WAKEUP) 1482 touch_core_sched(rq, p); 1483 1484 do_enqueue_task(rq, p, enq_flags, sticky_cpu); 1485 out: 1486 rq->scx.flags &= ~SCX_RQ_IN_WAKEUP; 1487 1488 if ((enq_flags & SCX_ENQ_CPU_SELECTED) && 1489 unlikely(cpu_of(rq) != p->scx.selected_cpu)) 1490 __scx_add_event(sch, SCX_EV_SELECT_CPU_FALLBACK, 1); 1491 } 1492 1493 static void ops_dequeue(struct rq *rq, struct task_struct *p, u64 deq_flags) 1494 { 1495 struct scx_sched *sch = scx_root; 1496 unsigned long opss; 1497 1498 /* dequeue is always temporary, don't reset runnable_at */ 1499 clr_task_runnable(p, false); 1500 1501 /* acquire ensures that we see the preceding updates on QUEUED */ 1502 opss = atomic_long_read_acquire(&p->scx.ops_state); 1503 1504 switch (opss & SCX_OPSS_STATE_MASK) { 1505 case SCX_OPSS_NONE: 1506 break; 1507 case SCX_OPSS_QUEUEING: 1508 /* 1509 * QUEUEING is started and finished while holding @p's rq lock. 1510 * As we're holding the rq lock now, we shouldn't see QUEUEING. 1511 */ 1512 BUG(); 1513 case SCX_OPSS_QUEUED: 1514 if (SCX_HAS_OP(sch, dequeue)) 1515 SCX_CALL_OP_TASK(sch, SCX_KF_REST, dequeue, rq, 1516 p, deq_flags); 1517 1518 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, 1519 SCX_OPSS_NONE)) 1520 break; 1521 fallthrough; 1522 case SCX_OPSS_DISPATCHING: 1523 /* 1524 * If @p is being dispatched from the BPF scheduler to a DSQ, 1525 * wait for the transfer to complete so that @p doesn't get 1526 * added to its DSQ after dequeueing is complete. 1527 * 1528 * As we're waiting on DISPATCHING with the rq locked, the 1529 * dispatching side shouldn't try to lock the rq while 1530 * DISPATCHING is set. See dispatch_to_local_dsq(). 1531 * 1532 * DISPATCHING shouldn't have qseq set and control can reach 1533 * here with NONE @opss from the above QUEUED case block. 1534 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss. 1535 */ 1536 wait_ops_state(p, SCX_OPSS_DISPATCHING); 1537 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); 1538 break; 1539 } 1540 } 1541 1542 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags) 1543 { 1544 struct scx_sched *sch = scx_root; 1545 1546 if (!(p->scx.flags & SCX_TASK_QUEUED)) { 1547 WARN_ON_ONCE(task_runnable(p)); 1548 return true; 1549 } 1550 1551 ops_dequeue(rq, p, deq_flags); 1552 1553 /* 1554 * A currently running task which is going off @rq first gets dequeued 1555 * and then stops running. As we want running <-> stopping transitions 1556 * to be contained within runnable <-> quiescent transitions, trigger 1557 * ->stopping() early here instead of in put_prev_task_scx(). 1558 * 1559 * @p may go through multiple stopping <-> running transitions between 1560 * here and put_prev_task_scx() if task attribute changes occur while 1561 * balance_scx() leaves @rq unlocked. However, they don't contain any 1562 * information meaningful to the BPF scheduler and can be suppressed by 1563 * skipping the callbacks if the task is !QUEUED. 1564 */ 1565 if (SCX_HAS_OP(sch, stopping) && task_current(rq, p)) { 1566 update_curr_scx(rq); 1567 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, false); 1568 } 1569 1570 if (SCX_HAS_OP(sch, quiescent) && !task_on_rq_migrating(p)) 1571 SCX_CALL_OP_TASK(sch, SCX_KF_REST, quiescent, rq, p, deq_flags); 1572 1573 if (deq_flags & SCX_DEQ_SLEEP) 1574 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP; 1575 else 1576 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP; 1577 1578 p->scx.flags &= ~SCX_TASK_QUEUED; 1579 rq->scx.nr_running--; 1580 sub_nr_running(rq, 1); 1581 1582 dispatch_dequeue(rq, p); 1583 return true; 1584 } 1585 1586 static void yield_task_scx(struct rq *rq) 1587 { 1588 struct scx_sched *sch = scx_root; 1589 struct task_struct *p = rq->donor; 1590 1591 if (SCX_HAS_OP(sch, yield)) 1592 SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, p, NULL); 1593 else 1594 p->scx.slice = 0; 1595 } 1596 1597 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to) 1598 { 1599 struct scx_sched *sch = scx_root; 1600 struct task_struct *from = rq->donor; 1601 1602 if (SCX_HAS_OP(sch, yield)) 1603 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, 1604 from, to); 1605 else 1606 return false; 1607 } 1608 1609 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags, 1610 struct scx_dispatch_q *src_dsq, 1611 struct rq *dst_rq) 1612 { 1613 struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq; 1614 1615 /* @dsq is locked and @p is on @dst_rq */ 1616 lockdep_assert_held(&src_dsq->lock); 1617 lockdep_assert_rq_held(dst_rq); 1618 1619 WARN_ON_ONCE(p->scx.holding_cpu >= 0); 1620 1621 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) 1622 list_add(&p->scx.dsq_list.node, &dst_dsq->list); 1623 else 1624 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list); 1625 1626 dsq_mod_nr(dst_dsq, 1); 1627 p->scx.dsq = dst_dsq; 1628 } 1629 1630 /** 1631 * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ 1632 * @p: task to move 1633 * @enq_flags: %SCX_ENQ_* 1634 * @src_rq: rq to move the task from, locked on entry, released on return 1635 * @dst_rq: rq to move the task into, locked on return 1636 * 1637 * Move @p which is currently on @src_rq to @dst_rq's local DSQ. 1638 */ 1639 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, 1640 struct rq *src_rq, struct rq *dst_rq) 1641 { 1642 lockdep_assert_rq_held(src_rq); 1643 1644 /* the following marks @p MIGRATING which excludes dequeue */ 1645 deactivate_task(src_rq, p, 0); 1646 set_task_cpu(p, cpu_of(dst_rq)); 1647 p->scx.sticky_cpu = cpu_of(dst_rq); 1648 1649 raw_spin_rq_unlock(src_rq); 1650 raw_spin_rq_lock(dst_rq); 1651 1652 /* 1653 * We want to pass scx-specific enq_flags but activate_task() will 1654 * truncate the upper 32 bit. As we own @rq, we can pass them through 1655 * @rq->scx.extra_enq_flags instead. 1656 */ 1657 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr)); 1658 WARN_ON_ONCE(dst_rq->scx.extra_enq_flags); 1659 dst_rq->scx.extra_enq_flags = enq_flags; 1660 activate_task(dst_rq, p, 0); 1661 dst_rq->scx.extra_enq_flags = 0; 1662 } 1663 1664 /* 1665 * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two 1666 * differences: 1667 * 1668 * - is_cpu_allowed() asks "Can this task run on this CPU?" while 1669 * task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to 1670 * this CPU?". 1671 * 1672 * While migration is disabled, is_cpu_allowed() has to say "yes" as the task 1673 * must be allowed to finish on the CPU that it's currently on regardless of 1674 * the CPU state. However, task_can_run_on_remote_rq() must say "no" as the 1675 * BPF scheduler shouldn't attempt to migrate a task which has migration 1676 * disabled. 1677 * 1678 * - The BPF scheduler is bypassed while the rq is offline and we can always say 1679 * no to the BPF scheduler initiated migrations while offline. 1680 * 1681 * The caller must ensure that @p and @rq are on different CPUs. 1682 */ 1683 static bool task_can_run_on_remote_rq(struct scx_sched *sch, 1684 struct task_struct *p, struct rq *rq, 1685 bool enforce) 1686 { 1687 int cpu = cpu_of(rq); 1688 1689 WARN_ON_ONCE(task_cpu(p) == cpu); 1690 1691 /* 1692 * If @p has migration disabled, @p->cpus_ptr is updated to contain only 1693 * the pinned CPU in migrate_disable_switch() while @p is being switched 1694 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is 1695 * updated and thus another CPU may see @p on a DSQ inbetween leading to 1696 * @p passing the below task_allowed_on_cpu() check while migration is 1697 * disabled. 1698 * 1699 * Test the migration disabled state first as the race window is narrow 1700 * and the BPF scheduler failing to check migration disabled state can 1701 * easily be masked if task_allowed_on_cpu() is done first. 1702 */ 1703 if (unlikely(is_migration_disabled(p))) { 1704 if (enforce) 1705 scx_error(sch, "SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d", 1706 p->comm, p->pid, task_cpu(p), cpu); 1707 return false; 1708 } 1709 1710 /* 1711 * We don't require the BPF scheduler to avoid dispatching to offline 1712 * CPUs mostly for convenience but also because CPUs can go offline 1713 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the 1714 * picked CPU is outside the allowed mask. 1715 */ 1716 if (!task_allowed_on_cpu(p, cpu)) { 1717 if (enforce) 1718 scx_error(sch, "SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]", 1719 cpu, p->comm, p->pid); 1720 return false; 1721 } 1722 1723 if (!scx_rq_online(rq)) { 1724 if (enforce) 1725 __scx_add_event(sch, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1); 1726 return false; 1727 } 1728 1729 return true; 1730 } 1731 1732 /** 1733 * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq 1734 * @p: target task 1735 * @dsq: locked DSQ @p is currently on 1736 * @src_rq: rq @p is currently on, stable with @dsq locked 1737 * 1738 * Called with @dsq locked but no rq's locked. We want to move @p to a different 1739 * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is 1740 * required when transferring into a local DSQ. Even when transferring into a 1741 * non-local DSQ, it's better to use the same mechanism to protect against 1742 * dequeues and maintain the invariant that @p->scx.dsq can only change while 1743 * @src_rq is locked, which e.g. scx_dump_task() depends on. 1744 * 1745 * We want to grab @src_rq but that can deadlock if we try while locking @dsq, 1746 * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As 1747 * this may race with dequeue, which can't drop the rq lock or fail, do a little 1748 * dancing from our side. 1749 * 1750 * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets 1751 * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu 1752 * would be cleared to -1. While other cpus may have updated it to different 1753 * values afterwards, as this operation can't be preempted or recurse, the 1754 * holding_cpu can never become this CPU again before we're done. Thus, we can 1755 * tell whether we lost to dequeue by testing whether the holding_cpu still 1756 * points to this CPU. See dispatch_dequeue() for the counterpart. 1757 * 1758 * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is 1759 * still valid. %false if lost to dequeue. 1760 */ 1761 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p, 1762 struct scx_dispatch_q *dsq, 1763 struct rq *src_rq) 1764 { 1765 s32 cpu = raw_smp_processor_id(); 1766 1767 lockdep_assert_held(&dsq->lock); 1768 1769 WARN_ON_ONCE(p->scx.holding_cpu >= 0); 1770 task_unlink_from_dsq(p, dsq); 1771 p->scx.holding_cpu = cpu; 1772 1773 raw_spin_unlock(&dsq->lock); 1774 raw_spin_rq_lock(src_rq); 1775 1776 /* task_rq couldn't have changed if we're still the holding cpu */ 1777 return likely(p->scx.holding_cpu == cpu) && 1778 !WARN_ON_ONCE(src_rq != task_rq(p)); 1779 } 1780 1781 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p, 1782 struct scx_dispatch_q *dsq, struct rq *src_rq) 1783 { 1784 raw_spin_rq_unlock(this_rq); 1785 1786 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) { 1787 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq); 1788 return true; 1789 } else { 1790 raw_spin_rq_unlock(src_rq); 1791 raw_spin_rq_lock(this_rq); 1792 return false; 1793 } 1794 } 1795 1796 /** 1797 * move_task_between_dsqs() - Move a task from one DSQ to another 1798 * @sch: scx_sched being operated on 1799 * @p: target task 1800 * @enq_flags: %SCX_ENQ_* 1801 * @src_dsq: DSQ @p is currently on, must not be a local DSQ 1802 * @dst_dsq: DSQ @p is being moved to, can be any DSQ 1803 * 1804 * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local 1805 * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq 1806 * will change. As @p's task_rq is locked, this function doesn't need to use the 1807 * holding_cpu mechanism. 1808 * 1809 * On return, @src_dsq is unlocked and only @p's new task_rq, which is the 1810 * return value, is locked. 1811 */ 1812 static struct rq *move_task_between_dsqs(struct scx_sched *sch, 1813 struct task_struct *p, u64 enq_flags, 1814 struct scx_dispatch_q *src_dsq, 1815 struct scx_dispatch_q *dst_dsq) 1816 { 1817 struct rq *src_rq = task_rq(p), *dst_rq; 1818 1819 BUG_ON(src_dsq->id == SCX_DSQ_LOCAL); 1820 lockdep_assert_held(&src_dsq->lock); 1821 lockdep_assert_rq_held(src_rq); 1822 1823 if (dst_dsq->id == SCX_DSQ_LOCAL) { 1824 dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq); 1825 if (src_rq != dst_rq && 1826 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) { 1827 dst_dsq = find_global_dsq(sch, p); 1828 dst_rq = src_rq; 1829 } 1830 } else { 1831 /* no need to migrate if destination is a non-local DSQ */ 1832 dst_rq = src_rq; 1833 } 1834 1835 /* 1836 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different 1837 * CPU, @p will be migrated. 1838 */ 1839 if (dst_dsq->id == SCX_DSQ_LOCAL) { 1840 /* @p is going from a non-local DSQ to a local DSQ */ 1841 if (src_rq == dst_rq) { 1842 task_unlink_from_dsq(p, src_dsq); 1843 move_local_task_to_local_dsq(p, enq_flags, 1844 src_dsq, dst_rq); 1845 raw_spin_unlock(&src_dsq->lock); 1846 } else { 1847 raw_spin_unlock(&src_dsq->lock); 1848 move_remote_task_to_local_dsq(p, enq_flags, 1849 src_rq, dst_rq); 1850 } 1851 } else { 1852 /* 1853 * @p is going from a non-local DSQ to a non-local DSQ. As 1854 * $src_dsq is already locked, do an abbreviated dequeue. 1855 */ 1856 dispatch_dequeue_locked(p, src_dsq); 1857 raw_spin_unlock(&src_dsq->lock); 1858 1859 dispatch_enqueue(sch, dst_dsq, p, enq_flags); 1860 } 1861 1862 return dst_rq; 1863 } 1864 1865 static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq, 1866 struct scx_dispatch_q *dsq) 1867 { 1868 struct task_struct *p; 1869 retry: 1870 /* 1871 * The caller can't expect to successfully consume a task if the task's 1872 * addition to @dsq isn't guaranteed to be visible somehow. Test 1873 * @dsq->list without locking and skip if it seems empty. 1874 */ 1875 if (list_empty(&dsq->list)) 1876 return false; 1877 1878 raw_spin_lock(&dsq->lock); 1879 1880 nldsq_for_each_task(p, dsq) { 1881 struct rq *task_rq = task_rq(p); 1882 1883 /* 1884 * This loop can lead to multiple lockup scenarios, e.g. the BPF 1885 * scheduler can put an enormous number of affinitized tasks into 1886 * a contended DSQ, or the outer retry loop can repeatedly race 1887 * against scx_bypass() dequeueing tasks from @dsq trying to put 1888 * the system into the bypass mode. This can easily live-lock the 1889 * machine. If aborting, exit from all non-bypass DSQs. 1890 */ 1891 if (unlikely(READ_ONCE(scx_aborting)) && dsq->id != SCX_DSQ_BYPASS) 1892 break; 1893 1894 if (rq == task_rq) { 1895 task_unlink_from_dsq(p, dsq); 1896 move_local_task_to_local_dsq(p, 0, dsq, rq); 1897 raw_spin_unlock(&dsq->lock); 1898 return true; 1899 } 1900 1901 if (task_can_run_on_remote_rq(sch, p, rq, false)) { 1902 if (likely(consume_remote_task(rq, p, dsq, task_rq))) 1903 return true; 1904 goto retry; 1905 } 1906 } 1907 1908 raw_spin_unlock(&dsq->lock); 1909 return false; 1910 } 1911 1912 static bool consume_global_dsq(struct scx_sched *sch, struct rq *rq) 1913 { 1914 int node = cpu_to_node(cpu_of(rq)); 1915 1916 return consume_dispatch_q(sch, rq, sch->global_dsqs[node]); 1917 } 1918 1919 /** 1920 * dispatch_to_local_dsq - Dispatch a task to a local dsq 1921 * @sch: scx_sched being operated on 1922 * @rq: current rq which is locked 1923 * @dst_dsq: destination DSQ 1924 * @p: task to dispatch 1925 * @enq_flags: %SCX_ENQ_* 1926 * 1927 * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local 1928 * DSQ. This function performs all the synchronization dancing needed because 1929 * local DSQs are protected with rq locks. 1930 * 1931 * The caller must have exclusive ownership of @p (e.g. through 1932 * %SCX_OPSS_DISPATCHING). 1933 */ 1934 static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq, 1935 struct scx_dispatch_q *dst_dsq, 1936 struct task_struct *p, u64 enq_flags) 1937 { 1938 struct rq *src_rq = task_rq(p); 1939 struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq); 1940 struct rq *locked_rq = rq; 1941 1942 /* 1943 * We're synchronized against dequeue through DISPATCHING. As @p can't 1944 * be dequeued, its task_rq and cpus_allowed are stable too. 1945 * 1946 * If dispatching to @rq that @p is already on, no lock dancing needed. 1947 */ 1948 if (rq == src_rq && rq == dst_rq) { 1949 dispatch_enqueue(sch, dst_dsq, p, 1950 enq_flags | SCX_ENQ_CLEAR_OPSS); 1951 return; 1952 } 1953 1954 if (src_rq != dst_rq && 1955 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) { 1956 dispatch_enqueue(sch, find_global_dsq(sch, p), p, 1957 enq_flags | SCX_ENQ_CLEAR_OPSS); 1958 return; 1959 } 1960 1961 /* 1962 * @p is on a possibly remote @src_rq which we need to lock to move the 1963 * task. If dequeue is in progress, it'd be locking @src_rq and waiting 1964 * on DISPATCHING, so we can't grab @src_rq lock while holding 1965 * DISPATCHING. 1966 * 1967 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that 1968 * we're moving from a DSQ and use the same mechanism - mark the task 1969 * under transfer with holding_cpu, release DISPATCHING and then follow 1970 * the same protocol. See unlink_dsq_and_lock_src_rq(). 1971 */ 1972 p->scx.holding_cpu = raw_smp_processor_id(); 1973 1974 /* store_release ensures that dequeue sees the above */ 1975 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1976 1977 /* switch to @src_rq lock */ 1978 if (locked_rq != src_rq) { 1979 raw_spin_rq_unlock(locked_rq); 1980 locked_rq = src_rq; 1981 raw_spin_rq_lock(src_rq); 1982 } 1983 1984 /* task_rq couldn't have changed if we're still the holding cpu */ 1985 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) && 1986 !WARN_ON_ONCE(src_rq != task_rq(p))) { 1987 /* 1988 * If @p is staying on the same rq, there's no need to go 1989 * through the full deactivate/activate cycle. Optimize by 1990 * abbreviating move_remote_task_to_local_dsq(). 1991 */ 1992 if (src_rq == dst_rq) { 1993 p->scx.holding_cpu = -1; 1994 dispatch_enqueue(sch, &dst_rq->scx.local_dsq, p, 1995 enq_flags); 1996 } else { 1997 move_remote_task_to_local_dsq(p, enq_flags, 1998 src_rq, dst_rq); 1999 /* task has been moved to dst_rq, which is now locked */ 2000 locked_rq = dst_rq; 2001 } 2002 2003 /* if the destination CPU is idle, wake it up */ 2004 if (sched_class_above(p->sched_class, dst_rq->curr->sched_class)) 2005 resched_curr(dst_rq); 2006 } 2007 2008 /* switch back to @rq lock */ 2009 if (locked_rq != rq) { 2010 raw_spin_rq_unlock(locked_rq); 2011 raw_spin_rq_lock(rq); 2012 } 2013 } 2014 2015 /** 2016 * finish_dispatch - Asynchronously finish dispatching a task 2017 * @rq: current rq which is locked 2018 * @p: task to finish dispatching 2019 * @qseq_at_dispatch: qseq when @p started getting dispatched 2020 * @dsq_id: destination DSQ ID 2021 * @enq_flags: %SCX_ENQ_* 2022 * 2023 * Dispatching to local DSQs may need to wait for queueing to complete or 2024 * require rq lock dancing. As we don't wanna do either while inside 2025 * ops.dispatch() to avoid locking order inversion, we split dispatching into 2026 * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the 2027 * task and its qseq. Once ops.dispatch() returns, this function is called to 2028 * finish up. 2029 * 2030 * There is no guarantee that @p is still valid for dispatching or even that it 2031 * was valid in the first place. Make sure that the task is still owned by the 2032 * BPF scheduler and claim the ownership before dispatching. 2033 */ 2034 static void finish_dispatch(struct scx_sched *sch, struct rq *rq, 2035 struct task_struct *p, 2036 unsigned long qseq_at_dispatch, 2037 u64 dsq_id, u64 enq_flags) 2038 { 2039 struct scx_dispatch_q *dsq; 2040 unsigned long opss; 2041 2042 touch_core_sched_dispatch(rq, p); 2043 retry: 2044 /* 2045 * No need for _acquire here. @p is accessed only after a successful 2046 * try_cmpxchg to DISPATCHING. 2047 */ 2048 opss = atomic_long_read(&p->scx.ops_state); 2049 2050 switch (opss & SCX_OPSS_STATE_MASK) { 2051 case SCX_OPSS_DISPATCHING: 2052 case SCX_OPSS_NONE: 2053 /* someone else already got to it */ 2054 return; 2055 case SCX_OPSS_QUEUED: 2056 /* 2057 * If qseq doesn't match, @p has gone through at least one 2058 * dispatch/dequeue and re-enqueue cycle between 2059 * scx_bpf_dsq_insert() and here and we have no claim on it. 2060 */ 2061 if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch) 2062 return; 2063 2064 /* 2065 * While we know @p is accessible, we don't yet have a claim on 2066 * it - the BPF scheduler is allowed to dispatch tasks 2067 * spuriously and there can be a racing dequeue attempt. Let's 2068 * claim @p by atomically transitioning it from QUEUED to 2069 * DISPATCHING. 2070 */ 2071 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, 2072 SCX_OPSS_DISPATCHING))) 2073 break; 2074 goto retry; 2075 case SCX_OPSS_QUEUEING: 2076 /* 2077 * do_enqueue_task() is in the process of transferring the task 2078 * to the BPF scheduler while holding @p's rq lock. As we aren't 2079 * holding any kernel or BPF resource that the enqueue path may 2080 * depend upon, it's safe to wait. 2081 */ 2082 wait_ops_state(p, opss); 2083 goto retry; 2084 } 2085 2086 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED)); 2087 2088 dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, p); 2089 2090 if (dsq->id == SCX_DSQ_LOCAL) 2091 dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags); 2092 else 2093 dispatch_enqueue(sch, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); 2094 } 2095 2096 static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq) 2097 { 2098 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 2099 u32 u; 2100 2101 for (u = 0; u < dspc->cursor; u++) { 2102 struct scx_dsp_buf_ent *ent = &dspc->buf[u]; 2103 2104 finish_dispatch(sch, rq, ent->task, ent->qseq, ent->dsq_id, 2105 ent->enq_flags); 2106 } 2107 2108 dspc->nr_tasks += dspc->cursor; 2109 dspc->cursor = 0; 2110 } 2111 2112 static inline void maybe_queue_balance_callback(struct rq *rq) 2113 { 2114 lockdep_assert_rq_held(rq); 2115 2116 if (!(rq->scx.flags & SCX_RQ_BAL_CB_PENDING)) 2117 return; 2118 2119 queue_balance_callback(rq, &rq->scx.deferred_bal_cb, 2120 deferred_bal_cb_workfn); 2121 2122 rq->scx.flags &= ~SCX_RQ_BAL_CB_PENDING; 2123 } 2124 2125 static int balance_one(struct rq *rq, struct task_struct *prev) 2126 { 2127 struct scx_sched *sch = scx_root; 2128 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 2129 bool prev_on_scx = prev->sched_class == &ext_sched_class; 2130 bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED; 2131 int nr_loops = SCX_DSP_MAX_LOOPS; 2132 2133 lockdep_assert_rq_held(rq); 2134 rq->scx.flags |= SCX_RQ_IN_BALANCE; 2135 rq->scx.flags &= ~SCX_RQ_BAL_KEEP; 2136 2137 if ((sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT) && 2138 unlikely(rq->scx.cpu_released)) { 2139 /* 2140 * If the previous sched_class for the current CPU was not SCX, 2141 * notify the BPF scheduler that it again has control of the 2142 * core. This callback complements ->cpu_release(), which is 2143 * emitted in switch_class(). 2144 */ 2145 if (SCX_HAS_OP(sch, cpu_acquire)) 2146 SCX_CALL_OP(sch, SCX_KF_REST, cpu_acquire, rq, 2147 cpu_of(rq), NULL); 2148 rq->scx.cpu_released = false; 2149 } 2150 2151 if (prev_on_scx) { 2152 update_curr_scx(rq); 2153 2154 /* 2155 * If @prev is runnable & has slice left, it has priority and 2156 * fetching more just increases latency for the fetched tasks. 2157 * Tell pick_task_scx() to keep running @prev. If the BPF 2158 * scheduler wants to handle this explicitly, it should 2159 * implement ->cpu_release(). 2160 * 2161 * See scx_disable_workfn() for the explanation on the bypassing 2162 * test. 2163 */ 2164 if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) { 2165 rq->scx.flags |= SCX_RQ_BAL_KEEP; 2166 goto has_tasks; 2167 } 2168 } 2169 2170 /* if there already are tasks to run, nothing to do */ 2171 if (rq->scx.local_dsq.nr) 2172 goto has_tasks; 2173 2174 if (consume_global_dsq(sch, rq)) 2175 goto has_tasks; 2176 2177 if (scx_rq_bypassing(rq)) { 2178 if (consume_dispatch_q(sch, rq, &rq->scx.bypass_dsq)) 2179 goto has_tasks; 2180 else 2181 goto no_tasks; 2182 } 2183 2184 if (unlikely(!SCX_HAS_OP(sch, dispatch)) || !scx_rq_online(rq)) 2185 goto no_tasks; 2186 2187 dspc->rq = rq; 2188 2189 /* 2190 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock, 2191 * the local DSQ might still end up empty after a successful 2192 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch() 2193 * produced some tasks, retry. The BPF scheduler may depend on this 2194 * looping behavior to simplify its implementation. 2195 */ 2196 do { 2197 dspc->nr_tasks = 0; 2198 2199 SCX_CALL_OP(sch, SCX_KF_DISPATCH, dispatch, rq, 2200 cpu_of(rq), prev_on_scx ? prev : NULL); 2201 2202 flush_dispatch_buf(sch, rq); 2203 2204 if (prev_on_rq && prev->scx.slice) { 2205 rq->scx.flags |= SCX_RQ_BAL_KEEP; 2206 goto has_tasks; 2207 } 2208 if (rq->scx.local_dsq.nr) 2209 goto has_tasks; 2210 if (consume_global_dsq(sch, rq)) 2211 goto has_tasks; 2212 2213 /* 2214 * ops.dispatch() can trap us in this loop by repeatedly 2215 * dispatching ineligible tasks. Break out once in a while to 2216 * allow the watchdog to run. As IRQ can't be enabled in 2217 * balance(), we want to complete this scheduling cycle and then 2218 * start a new one. IOW, we want to call resched_curr() on the 2219 * next, most likely idle, task, not the current one. Use 2220 * scx_kick_cpu() for deferred kicking. 2221 */ 2222 if (unlikely(!--nr_loops)) { 2223 scx_kick_cpu(sch, cpu_of(rq), 0); 2224 break; 2225 } 2226 } while (dspc->nr_tasks); 2227 2228 no_tasks: 2229 /* 2230 * Didn't find another task to run. Keep running @prev unless 2231 * %SCX_OPS_ENQ_LAST is in effect. 2232 */ 2233 if (prev_on_rq && 2234 (!(sch->ops.flags & SCX_OPS_ENQ_LAST) || scx_rq_bypassing(rq))) { 2235 rq->scx.flags |= SCX_RQ_BAL_KEEP; 2236 __scx_add_event(sch, SCX_EV_DISPATCH_KEEP_LAST, 1); 2237 goto has_tasks; 2238 } 2239 rq->scx.flags &= ~SCX_RQ_IN_BALANCE; 2240 return false; 2241 2242 has_tasks: 2243 rq->scx.flags &= ~SCX_RQ_IN_BALANCE; 2244 return true; 2245 } 2246 2247 static void process_ddsp_deferred_locals(struct rq *rq) 2248 { 2249 struct task_struct *p; 2250 2251 lockdep_assert_rq_held(rq); 2252 2253 /* 2254 * Now that @rq can be unlocked, execute the deferred enqueueing of 2255 * tasks directly dispatched to the local DSQs of other CPUs. See 2256 * direct_dispatch(). Keep popping from the head instead of using 2257 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq 2258 * temporarily. 2259 */ 2260 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals, 2261 struct task_struct, scx.dsq_list.node))) { 2262 struct scx_sched *sch = scx_root; 2263 struct scx_dispatch_q *dsq; 2264 2265 list_del_init(&p->scx.dsq_list.node); 2266 2267 dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); 2268 if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL)) 2269 dispatch_to_local_dsq(sch, rq, dsq, p, 2270 p->scx.ddsp_enq_flags); 2271 } 2272 } 2273 2274 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) 2275 { 2276 struct scx_sched *sch = scx_root; 2277 2278 if (p->scx.flags & SCX_TASK_QUEUED) { 2279 /* 2280 * Core-sched might decide to execute @p before it is 2281 * dispatched. Call ops_dequeue() to notify the BPF scheduler. 2282 */ 2283 ops_dequeue(rq, p, SCX_DEQ_CORE_SCHED_EXEC); 2284 dispatch_dequeue(rq, p); 2285 } 2286 2287 p->se.exec_start = rq_clock_task(rq); 2288 2289 /* see dequeue_task_scx() on why we skip when !QUEUED */ 2290 if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED)) 2291 SCX_CALL_OP_TASK(sch, SCX_KF_REST, running, rq, p); 2292 2293 clr_task_runnable(p, true); 2294 2295 /* 2296 * @p is getting newly scheduled or got kicked after someone updated its 2297 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick(). 2298 */ 2299 if ((p->scx.slice == SCX_SLICE_INF) != 2300 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) { 2301 if (p->scx.slice == SCX_SLICE_INF) 2302 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK; 2303 else 2304 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK; 2305 2306 sched_update_tick_dependency(rq); 2307 2308 /* 2309 * For now, let's refresh the load_avgs just when transitioning 2310 * in and out of nohz. In the future, we might want to add a 2311 * mechanism which calls the following periodically on 2312 * tick-stopped CPUs. 2313 */ 2314 update_other_load_avgs(rq); 2315 } 2316 } 2317 2318 static enum scx_cpu_preempt_reason 2319 preempt_reason_from_class(const struct sched_class *class) 2320 { 2321 if (class == &stop_sched_class) 2322 return SCX_CPU_PREEMPT_STOP; 2323 if (class == &dl_sched_class) 2324 return SCX_CPU_PREEMPT_DL; 2325 if (class == &rt_sched_class) 2326 return SCX_CPU_PREEMPT_RT; 2327 return SCX_CPU_PREEMPT_UNKNOWN; 2328 } 2329 2330 static void switch_class(struct rq *rq, struct task_struct *next) 2331 { 2332 struct scx_sched *sch = scx_root; 2333 const struct sched_class *next_class = next->sched_class; 2334 2335 if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT)) 2336 return; 2337 2338 /* 2339 * The callback is conceptually meant to convey that the CPU is no 2340 * longer under the control of SCX. Therefore, don't invoke the callback 2341 * if the next class is below SCX (in which case the BPF scheduler has 2342 * actively decided not to schedule any tasks on the CPU). 2343 */ 2344 if (sched_class_above(&ext_sched_class, next_class)) 2345 return; 2346 2347 /* 2348 * At this point we know that SCX was preempted by a higher priority 2349 * sched_class, so invoke the ->cpu_release() callback if we have not 2350 * done so already. We only send the callback once between SCX being 2351 * preempted, and it regaining control of the CPU. 2352 * 2353 * ->cpu_release() complements ->cpu_acquire(), which is emitted the 2354 * next time that balance_scx() is invoked. 2355 */ 2356 if (!rq->scx.cpu_released) { 2357 if (SCX_HAS_OP(sch, cpu_release)) { 2358 struct scx_cpu_release_args args = { 2359 .reason = preempt_reason_from_class(next_class), 2360 .task = next, 2361 }; 2362 2363 SCX_CALL_OP(sch, SCX_KF_CPU_RELEASE, cpu_release, rq, 2364 cpu_of(rq), &args); 2365 } 2366 rq->scx.cpu_released = true; 2367 } 2368 } 2369 2370 static void put_prev_task_scx(struct rq *rq, struct task_struct *p, 2371 struct task_struct *next) 2372 { 2373 struct scx_sched *sch = scx_root; 2374 2375 /* see kick_cpus_irq_workfn() */ 2376 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1); 2377 2378 update_curr_scx(rq); 2379 2380 /* see dequeue_task_scx() on why we skip when !QUEUED */ 2381 if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED)) 2382 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, true); 2383 2384 if (p->scx.flags & SCX_TASK_QUEUED) { 2385 set_task_runnable(rq, p); 2386 2387 /* 2388 * If @p has slice left and is being put, @p is getting 2389 * preempted by a higher priority scheduler class or core-sched 2390 * forcing a different task. Leave it at the head of the local 2391 * DSQ. 2392 */ 2393 if (p->scx.slice && !scx_rq_bypassing(rq)) { 2394 dispatch_enqueue(sch, &rq->scx.local_dsq, p, 2395 SCX_ENQ_HEAD); 2396 goto switch_class; 2397 } 2398 2399 /* 2400 * If @p is runnable but we're about to enter a lower 2401 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell 2402 * ops.enqueue() that @p is the only one available for this cpu, 2403 * which should trigger an explicit follow-up scheduling event. 2404 */ 2405 if (sched_class_above(&ext_sched_class, next->sched_class)) { 2406 WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST)); 2407 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1); 2408 } else { 2409 do_enqueue_task(rq, p, 0, -1); 2410 } 2411 } 2412 2413 switch_class: 2414 if (next && next->sched_class != &ext_sched_class) 2415 switch_class(rq, next); 2416 } 2417 2418 static struct task_struct *first_local_task(struct rq *rq) 2419 { 2420 return list_first_entry_or_null(&rq->scx.local_dsq.list, 2421 struct task_struct, scx.dsq_list.node); 2422 } 2423 2424 static struct task_struct * 2425 do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx) 2426 { 2427 struct task_struct *prev = rq->curr; 2428 bool keep_prev, kick_idle = false; 2429 struct task_struct *p; 2430 2431 /* see kick_cpus_irq_workfn() */ 2432 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1); 2433 2434 rq->next_class = &ext_sched_class; 2435 2436 rq_unpin_lock(rq, rf); 2437 balance_one(rq, prev); 2438 rq_repin_lock(rq, rf); 2439 maybe_queue_balance_callback(rq); 2440 2441 /* 2442 * If any higher-priority sched class enqueued a runnable task on 2443 * this rq during balance_one(), abort and return RETRY_TASK, so 2444 * that the scheduler loop can restart. 2445 * 2446 * If @force_scx is true, always try to pick a SCHED_EXT task, 2447 * regardless of any higher-priority sched classes activity. 2448 */ 2449 if (!force_scx && sched_class_above(rq->next_class, &ext_sched_class)) 2450 return RETRY_TASK; 2451 2452 keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP; 2453 if (unlikely(keep_prev && 2454 prev->sched_class != &ext_sched_class)) { 2455 WARN_ON_ONCE(scx_enable_state() == SCX_ENABLED); 2456 keep_prev = false; 2457 } 2458 2459 /* 2460 * If balance_scx() is telling us to keep running @prev, replenish slice 2461 * if necessary and keep running @prev. Otherwise, pop the first one 2462 * from the local DSQ. 2463 */ 2464 if (keep_prev) { 2465 p = prev; 2466 if (!p->scx.slice) 2467 refill_task_slice_dfl(rcu_dereference_sched(scx_root), p); 2468 } else { 2469 p = first_local_task(rq); 2470 if (!p) { 2471 if (kick_idle) 2472 scx_kick_cpu(rcu_dereference_sched(scx_root), 2473 cpu_of(rq), SCX_KICK_IDLE); 2474 return NULL; 2475 } 2476 2477 if (unlikely(!p->scx.slice)) { 2478 struct scx_sched *sch = rcu_dereference_sched(scx_root); 2479 2480 if (!scx_rq_bypassing(rq) && !sch->warned_zero_slice) { 2481 printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n", 2482 p->comm, p->pid, __func__); 2483 sch->warned_zero_slice = true; 2484 } 2485 refill_task_slice_dfl(sch, p); 2486 } 2487 } 2488 2489 return p; 2490 } 2491 2492 static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf) 2493 { 2494 return do_pick_task_scx(rq, rf, false); 2495 } 2496 2497 #ifdef CONFIG_SCHED_CORE 2498 /** 2499 * scx_prio_less - Task ordering for core-sched 2500 * @a: task A 2501 * @b: task B 2502 * @in_fi: in forced idle state 2503 * 2504 * Core-sched is implemented as an additional scheduling layer on top of the 2505 * usual sched_class'es and needs to find out the expected task ordering. For 2506 * SCX, core-sched calls this function to interrogate the task ordering. 2507 * 2508 * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used 2509 * to implement the default task ordering. The older the timestamp, the higher 2510 * priority the task - the global FIFO ordering matching the default scheduling 2511 * behavior. 2512 * 2513 * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to 2514 * implement FIFO ordering within each local DSQ. See pick_task_scx(). 2515 */ 2516 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b, 2517 bool in_fi) 2518 { 2519 struct scx_sched *sch = scx_root; 2520 2521 /* 2522 * The const qualifiers are dropped from task_struct pointers when 2523 * calling ops.core_sched_before(). Accesses are controlled by the 2524 * verifier. 2525 */ 2526 if (SCX_HAS_OP(sch, core_sched_before) && 2527 !scx_rq_bypassing(task_rq(a))) 2528 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, core_sched_before, 2529 NULL, 2530 (struct task_struct *)a, 2531 (struct task_struct *)b); 2532 else 2533 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at); 2534 } 2535 #endif /* CONFIG_SCHED_CORE */ 2536 2537 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags) 2538 { 2539 struct scx_sched *sch = scx_root; 2540 bool rq_bypass; 2541 2542 /* 2543 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it 2544 * can be a good migration opportunity with low cache and memory 2545 * footprint. Returning a CPU different than @prev_cpu triggers 2546 * immediate rq migration. However, for SCX, as the current rq 2547 * association doesn't dictate where the task is going to run, this 2548 * doesn't fit well. If necessary, we can later add a dedicated method 2549 * which can decide to preempt self to force it through the regular 2550 * scheduling path. 2551 */ 2552 if (unlikely(wake_flags & WF_EXEC)) 2553 return prev_cpu; 2554 2555 rq_bypass = scx_rq_bypassing(task_rq(p)); 2556 if (likely(SCX_HAS_OP(sch, select_cpu)) && !rq_bypass) { 2557 s32 cpu; 2558 struct task_struct **ddsp_taskp; 2559 2560 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task); 2561 WARN_ON_ONCE(*ddsp_taskp); 2562 *ddsp_taskp = p; 2563 2564 cpu = SCX_CALL_OP_TASK_RET(sch, 2565 SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU, 2566 select_cpu, NULL, p, prev_cpu, 2567 wake_flags); 2568 p->scx.selected_cpu = cpu; 2569 *ddsp_taskp = NULL; 2570 if (ops_cpu_valid(sch, cpu, "from ops.select_cpu()")) 2571 return cpu; 2572 else 2573 return prev_cpu; 2574 } else { 2575 s32 cpu; 2576 2577 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0); 2578 if (cpu >= 0) { 2579 refill_task_slice_dfl(sch, p); 2580 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL; 2581 } else { 2582 cpu = prev_cpu; 2583 } 2584 p->scx.selected_cpu = cpu; 2585 2586 if (rq_bypass) 2587 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1); 2588 return cpu; 2589 } 2590 } 2591 2592 static void task_woken_scx(struct rq *rq, struct task_struct *p) 2593 { 2594 run_deferred(rq); 2595 } 2596 2597 static void set_cpus_allowed_scx(struct task_struct *p, 2598 struct affinity_context *ac) 2599 { 2600 struct scx_sched *sch = scx_root; 2601 2602 set_cpus_allowed_common(p, ac); 2603 2604 /* 2605 * The effective cpumask is stored in @p->cpus_ptr which may temporarily 2606 * differ from the configured one in @p->cpus_mask. Always tell the bpf 2607 * scheduler the effective one. 2608 * 2609 * Fine-grained memory write control is enforced by BPF making the const 2610 * designation pointless. Cast it away when calling the operation. 2611 */ 2612 if (SCX_HAS_OP(sch, set_cpumask)) 2613 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, NULL, 2614 p, (struct cpumask *)p->cpus_ptr); 2615 } 2616 2617 static void handle_hotplug(struct rq *rq, bool online) 2618 { 2619 struct scx_sched *sch = scx_root; 2620 int cpu = cpu_of(rq); 2621 2622 atomic_long_inc(&scx_hotplug_seq); 2623 2624 /* 2625 * scx_root updates are protected by cpus_read_lock() and will stay 2626 * stable here. Note that we can't depend on scx_enabled() test as the 2627 * hotplug ops need to be enabled before __scx_enabled is set. 2628 */ 2629 if (unlikely(!sch)) 2630 return; 2631 2632 if (scx_enabled()) 2633 scx_idle_update_selcpu_topology(&sch->ops); 2634 2635 if (online && SCX_HAS_OP(sch, cpu_online)) 2636 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_online, NULL, cpu); 2637 else if (!online && SCX_HAS_OP(sch, cpu_offline)) 2638 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_offline, NULL, cpu); 2639 else 2640 scx_exit(sch, SCX_EXIT_UNREG_KERN, 2641 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG, 2642 "cpu %d going %s, exiting scheduler", cpu, 2643 online ? "online" : "offline"); 2644 } 2645 2646 void scx_rq_activate(struct rq *rq) 2647 { 2648 handle_hotplug(rq, true); 2649 } 2650 2651 void scx_rq_deactivate(struct rq *rq) 2652 { 2653 handle_hotplug(rq, false); 2654 } 2655 2656 static void rq_online_scx(struct rq *rq) 2657 { 2658 rq->scx.flags |= SCX_RQ_ONLINE; 2659 } 2660 2661 static void rq_offline_scx(struct rq *rq) 2662 { 2663 rq->scx.flags &= ~SCX_RQ_ONLINE; 2664 } 2665 2666 2667 static bool check_rq_for_timeouts(struct rq *rq) 2668 { 2669 struct scx_sched *sch; 2670 struct task_struct *p; 2671 struct rq_flags rf; 2672 bool timed_out = false; 2673 2674 rq_lock_irqsave(rq, &rf); 2675 sch = rcu_dereference_bh(scx_root); 2676 if (unlikely(!sch)) 2677 goto out_unlock; 2678 2679 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) { 2680 unsigned long last_runnable = p->scx.runnable_at; 2681 2682 if (unlikely(time_after(jiffies, 2683 last_runnable + scx_watchdog_timeout))) { 2684 u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable); 2685 2686 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0, 2687 "%s[%d] failed to run for %u.%03us", 2688 p->comm, p->pid, dur_ms / 1000, dur_ms % 1000); 2689 timed_out = true; 2690 break; 2691 } 2692 } 2693 out_unlock: 2694 rq_unlock_irqrestore(rq, &rf); 2695 return timed_out; 2696 } 2697 2698 static void scx_watchdog_workfn(struct work_struct *work) 2699 { 2700 int cpu; 2701 2702 WRITE_ONCE(scx_watchdog_timestamp, jiffies); 2703 2704 for_each_online_cpu(cpu) { 2705 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu)))) 2706 break; 2707 2708 cond_resched(); 2709 } 2710 queue_delayed_work(system_unbound_wq, to_delayed_work(work), 2711 scx_watchdog_timeout / 2); 2712 } 2713 2714 void scx_tick(struct rq *rq) 2715 { 2716 struct scx_sched *sch; 2717 unsigned long last_check; 2718 2719 if (!scx_enabled()) 2720 return; 2721 2722 sch = rcu_dereference_bh(scx_root); 2723 if (unlikely(!sch)) 2724 return; 2725 2726 last_check = READ_ONCE(scx_watchdog_timestamp); 2727 if (unlikely(time_after(jiffies, 2728 last_check + READ_ONCE(scx_watchdog_timeout)))) { 2729 u32 dur_ms = jiffies_to_msecs(jiffies - last_check); 2730 2731 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0, 2732 "watchdog failed to check in for %u.%03us", 2733 dur_ms / 1000, dur_ms % 1000); 2734 } 2735 2736 update_other_load_avgs(rq); 2737 } 2738 2739 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued) 2740 { 2741 struct scx_sched *sch = scx_root; 2742 2743 update_curr_scx(rq); 2744 2745 /* 2746 * While disabling, always resched and refresh core-sched timestamp as 2747 * we can't trust the slice management or ops.core_sched_before(). 2748 */ 2749 if (scx_rq_bypassing(rq)) { 2750 curr->scx.slice = 0; 2751 touch_core_sched(rq, curr); 2752 } else if (SCX_HAS_OP(sch, tick)) { 2753 SCX_CALL_OP_TASK(sch, SCX_KF_REST, tick, rq, curr); 2754 } 2755 2756 if (!curr->scx.slice) 2757 resched_curr(rq); 2758 } 2759 2760 #ifdef CONFIG_EXT_GROUP_SCHED 2761 static struct cgroup *tg_cgrp(struct task_group *tg) 2762 { 2763 /* 2764 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup, 2765 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the 2766 * root cgroup. 2767 */ 2768 if (tg && tg->css.cgroup) 2769 return tg->css.cgroup; 2770 else 2771 return &cgrp_dfl_root.cgrp; 2772 } 2773 2774 #define SCX_INIT_TASK_ARGS_CGROUP(tg) .cgroup = tg_cgrp(tg), 2775 2776 #else /* CONFIG_EXT_GROUP_SCHED */ 2777 2778 #define SCX_INIT_TASK_ARGS_CGROUP(tg) 2779 2780 #endif /* CONFIG_EXT_GROUP_SCHED */ 2781 2782 static enum scx_task_state scx_get_task_state(const struct task_struct *p) 2783 { 2784 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT; 2785 } 2786 2787 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state) 2788 { 2789 enum scx_task_state prev_state = scx_get_task_state(p); 2790 bool warn = false; 2791 2792 BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS)); 2793 2794 switch (state) { 2795 case SCX_TASK_NONE: 2796 break; 2797 case SCX_TASK_INIT: 2798 warn = prev_state != SCX_TASK_NONE; 2799 break; 2800 case SCX_TASK_READY: 2801 warn = prev_state == SCX_TASK_NONE; 2802 break; 2803 case SCX_TASK_ENABLED: 2804 warn = prev_state != SCX_TASK_READY; 2805 break; 2806 default: 2807 warn = true; 2808 return; 2809 } 2810 2811 WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]", 2812 prev_state, state, p->comm, p->pid); 2813 2814 p->scx.flags &= ~SCX_TASK_STATE_MASK; 2815 p->scx.flags |= state << SCX_TASK_STATE_SHIFT; 2816 } 2817 2818 static int scx_init_task(struct task_struct *p, struct task_group *tg, bool fork) 2819 { 2820 struct scx_sched *sch = scx_root; 2821 int ret; 2822 2823 p->scx.disallow = false; 2824 2825 if (SCX_HAS_OP(sch, init_task)) { 2826 struct scx_init_task_args args = { 2827 SCX_INIT_TASK_ARGS_CGROUP(tg) 2828 .fork = fork, 2829 }; 2830 2831 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init_task, NULL, 2832 p, &args); 2833 if (unlikely(ret)) { 2834 ret = ops_sanitize_err(sch, "init_task", ret); 2835 return ret; 2836 } 2837 } 2838 2839 scx_set_task_state(p, SCX_TASK_INIT); 2840 2841 if (p->scx.disallow) { 2842 if (!fork) { 2843 struct rq *rq; 2844 struct rq_flags rf; 2845 2846 rq = task_rq_lock(p, &rf); 2847 2848 /* 2849 * We're in the load path and @p->policy will be applied 2850 * right after. Reverting @p->policy here and rejecting 2851 * %SCHED_EXT transitions from scx_check_setscheduler() 2852 * guarantees that if ops.init_task() sets @p->disallow, 2853 * @p can never be in SCX. 2854 */ 2855 if (p->policy == SCHED_EXT) { 2856 p->policy = SCHED_NORMAL; 2857 atomic_long_inc(&scx_nr_rejected); 2858 } 2859 2860 task_rq_unlock(rq, p, &rf); 2861 } else if (p->policy == SCHED_EXT) { 2862 scx_error(sch, "ops.init_task() set task->scx.disallow for %s[%d] during fork", 2863 p->comm, p->pid); 2864 } 2865 } 2866 2867 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; 2868 return 0; 2869 } 2870 2871 static void scx_enable_task(struct task_struct *p) 2872 { 2873 struct scx_sched *sch = scx_root; 2874 struct rq *rq = task_rq(p); 2875 u32 weight; 2876 2877 lockdep_assert_rq_held(rq); 2878 2879 /* 2880 * Set the weight before calling ops.enable() so that the scheduler 2881 * doesn't see a stale value if they inspect the task struct. 2882 */ 2883 if (task_has_idle_policy(p)) 2884 weight = WEIGHT_IDLEPRIO; 2885 else 2886 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO]; 2887 2888 p->scx.weight = sched_weight_to_cgroup(weight); 2889 2890 if (SCX_HAS_OP(sch, enable)) 2891 SCX_CALL_OP_TASK(sch, SCX_KF_REST, enable, rq, p); 2892 scx_set_task_state(p, SCX_TASK_ENABLED); 2893 2894 if (SCX_HAS_OP(sch, set_weight)) 2895 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq, 2896 p, p->scx.weight); 2897 } 2898 2899 static void scx_disable_task(struct task_struct *p) 2900 { 2901 struct scx_sched *sch = scx_root; 2902 struct rq *rq = task_rq(p); 2903 2904 lockdep_assert_rq_held(rq); 2905 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED); 2906 2907 if (SCX_HAS_OP(sch, disable)) 2908 SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p); 2909 scx_set_task_state(p, SCX_TASK_READY); 2910 } 2911 2912 static void scx_exit_task(struct task_struct *p) 2913 { 2914 struct scx_sched *sch = scx_root; 2915 struct scx_exit_task_args args = { 2916 .cancelled = false, 2917 }; 2918 2919 lockdep_assert_rq_held(task_rq(p)); 2920 2921 switch (scx_get_task_state(p)) { 2922 case SCX_TASK_NONE: 2923 return; 2924 case SCX_TASK_INIT: 2925 args.cancelled = true; 2926 break; 2927 case SCX_TASK_READY: 2928 break; 2929 case SCX_TASK_ENABLED: 2930 scx_disable_task(p); 2931 break; 2932 default: 2933 WARN_ON_ONCE(true); 2934 return; 2935 } 2936 2937 if (SCX_HAS_OP(sch, exit_task)) 2938 SCX_CALL_OP_TASK(sch, SCX_KF_REST, exit_task, task_rq(p), 2939 p, &args); 2940 scx_set_task_state(p, SCX_TASK_NONE); 2941 } 2942 2943 void init_scx_entity(struct sched_ext_entity *scx) 2944 { 2945 memset(scx, 0, sizeof(*scx)); 2946 INIT_LIST_HEAD(&scx->dsq_list.node); 2947 RB_CLEAR_NODE(&scx->dsq_priq); 2948 scx->sticky_cpu = -1; 2949 scx->holding_cpu = -1; 2950 INIT_LIST_HEAD(&scx->runnable_node); 2951 scx->runnable_at = jiffies; 2952 scx->ddsp_dsq_id = SCX_DSQ_INVALID; 2953 scx->slice = READ_ONCE(scx_slice_dfl); 2954 } 2955 2956 void scx_pre_fork(struct task_struct *p) 2957 { 2958 /* 2959 * BPF scheduler enable/disable paths want to be able to iterate and 2960 * update all tasks which can become complex when racing forks. As 2961 * enable/disable are very cold paths, let's use a percpu_rwsem to 2962 * exclude forks. 2963 */ 2964 percpu_down_read(&scx_fork_rwsem); 2965 } 2966 2967 int scx_fork(struct task_struct *p) 2968 { 2969 percpu_rwsem_assert_held(&scx_fork_rwsem); 2970 2971 if (scx_init_task_enabled) 2972 return scx_init_task(p, task_group(p), true); 2973 else 2974 return 0; 2975 } 2976 2977 void scx_post_fork(struct task_struct *p) 2978 { 2979 if (scx_init_task_enabled) { 2980 scx_set_task_state(p, SCX_TASK_READY); 2981 2982 /* 2983 * Enable the task immediately if it's running on sched_ext. 2984 * Otherwise, it'll be enabled in switching_to_scx() if and 2985 * when it's ever configured to run with a SCHED_EXT policy. 2986 */ 2987 if (p->sched_class == &ext_sched_class) { 2988 struct rq_flags rf; 2989 struct rq *rq; 2990 2991 rq = task_rq_lock(p, &rf); 2992 scx_enable_task(p); 2993 task_rq_unlock(rq, p, &rf); 2994 } 2995 } 2996 2997 raw_spin_lock_irq(&scx_tasks_lock); 2998 list_add_tail(&p->scx.tasks_node, &scx_tasks); 2999 raw_spin_unlock_irq(&scx_tasks_lock); 3000 3001 percpu_up_read(&scx_fork_rwsem); 3002 } 3003 3004 void scx_cancel_fork(struct task_struct *p) 3005 { 3006 if (scx_enabled()) { 3007 struct rq *rq; 3008 struct rq_flags rf; 3009 3010 rq = task_rq_lock(p, &rf); 3011 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY); 3012 scx_exit_task(p); 3013 task_rq_unlock(rq, p, &rf); 3014 } 3015 3016 percpu_up_read(&scx_fork_rwsem); 3017 } 3018 3019 void sched_ext_dead(struct task_struct *p) 3020 { 3021 unsigned long flags; 3022 3023 raw_spin_lock_irqsave(&scx_tasks_lock, flags); 3024 list_del_init(&p->scx.tasks_node); 3025 raw_spin_unlock_irqrestore(&scx_tasks_lock, flags); 3026 3027 /* 3028 * @p is off scx_tasks and wholly ours. scx_enable()'s READY -> ENABLED 3029 * transitions can't race us. Disable ops for @p. 3030 */ 3031 if (scx_get_task_state(p) != SCX_TASK_NONE) { 3032 struct rq_flags rf; 3033 struct rq *rq; 3034 3035 rq = task_rq_lock(p, &rf); 3036 scx_exit_task(p); 3037 task_rq_unlock(rq, p, &rf); 3038 } 3039 } 3040 3041 static void reweight_task_scx(struct rq *rq, struct task_struct *p, 3042 const struct load_weight *lw) 3043 { 3044 struct scx_sched *sch = scx_root; 3045 3046 lockdep_assert_rq_held(task_rq(p)); 3047 3048 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight)); 3049 if (SCX_HAS_OP(sch, set_weight)) 3050 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq, 3051 p, p->scx.weight); 3052 } 3053 3054 static void prio_changed_scx(struct rq *rq, struct task_struct *p, u64 oldprio) 3055 { 3056 } 3057 3058 static void switching_to_scx(struct rq *rq, struct task_struct *p) 3059 { 3060 struct scx_sched *sch = scx_root; 3061 3062 scx_enable_task(p); 3063 3064 /* 3065 * set_cpus_allowed_scx() is not called while @p is associated with a 3066 * different scheduler class. Keep the BPF scheduler up-to-date. 3067 */ 3068 if (SCX_HAS_OP(sch, set_cpumask)) 3069 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, rq, 3070 p, (struct cpumask *)p->cpus_ptr); 3071 } 3072 3073 static void switched_from_scx(struct rq *rq, struct task_struct *p) 3074 { 3075 scx_disable_task(p); 3076 } 3077 3078 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p, int wake_flags) {} 3079 3080 static void switched_to_scx(struct rq *rq, struct task_struct *p) {} 3081 3082 int scx_check_setscheduler(struct task_struct *p, int policy) 3083 { 3084 lockdep_assert_rq_held(task_rq(p)); 3085 3086 /* if disallow, reject transitioning into SCX */ 3087 if (scx_enabled() && READ_ONCE(p->scx.disallow) && 3088 p->policy != policy && policy == SCHED_EXT) 3089 return -EACCES; 3090 3091 return 0; 3092 } 3093 3094 #ifdef CONFIG_NO_HZ_FULL 3095 bool scx_can_stop_tick(struct rq *rq) 3096 { 3097 struct task_struct *p = rq->curr; 3098 3099 if (scx_rq_bypassing(rq)) 3100 return false; 3101 3102 if (p->sched_class != &ext_sched_class) 3103 return true; 3104 3105 /* 3106 * @rq can dispatch from different DSQs, so we can't tell whether it 3107 * needs the tick or not by looking at nr_running. Allow stopping ticks 3108 * iff the BPF scheduler indicated so. See set_next_task_scx(). 3109 */ 3110 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK; 3111 } 3112 #endif 3113 3114 #ifdef CONFIG_EXT_GROUP_SCHED 3115 3116 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_ops_rwsem); 3117 static bool scx_cgroup_enabled; 3118 3119 void scx_tg_init(struct task_group *tg) 3120 { 3121 tg->scx.weight = CGROUP_WEIGHT_DFL; 3122 tg->scx.bw_period_us = default_bw_period_us(); 3123 tg->scx.bw_quota_us = RUNTIME_INF; 3124 tg->scx.idle = false; 3125 } 3126 3127 int scx_tg_online(struct task_group *tg) 3128 { 3129 struct scx_sched *sch = scx_root; 3130 int ret = 0; 3131 3132 WARN_ON_ONCE(tg->scx.flags & (SCX_TG_ONLINE | SCX_TG_INITED)); 3133 3134 if (scx_cgroup_enabled) { 3135 if (SCX_HAS_OP(sch, cgroup_init)) { 3136 struct scx_cgroup_init_args args = 3137 { .weight = tg->scx.weight, 3138 .bw_period_us = tg->scx.bw_period_us, 3139 .bw_quota_us = tg->scx.bw_quota_us, 3140 .bw_burst_us = tg->scx.bw_burst_us }; 3141 3142 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, 3143 NULL, tg->css.cgroup, &args); 3144 if (ret) 3145 ret = ops_sanitize_err(sch, "cgroup_init", ret); 3146 } 3147 if (ret == 0) 3148 tg->scx.flags |= SCX_TG_ONLINE | SCX_TG_INITED; 3149 } else { 3150 tg->scx.flags |= SCX_TG_ONLINE; 3151 } 3152 3153 return ret; 3154 } 3155 3156 void scx_tg_offline(struct task_group *tg) 3157 { 3158 struct scx_sched *sch = scx_root; 3159 3160 WARN_ON_ONCE(!(tg->scx.flags & SCX_TG_ONLINE)); 3161 3162 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_exit) && 3163 (tg->scx.flags & SCX_TG_INITED)) 3164 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL, 3165 tg->css.cgroup); 3166 tg->scx.flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED); 3167 } 3168 3169 int scx_cgroup_can_attach(struct cgroup_taskset *tset) 3170 { 3171 struct scx_sched *sch = scx_root; 3172 struct cgroup_subsys_state *css; 3173 struct task_struct *p; 3174 int ret; 3175 3176 if (!scx_cgroup_enabled) 3177 return 0; 3178 3179 cgroup_taskset_for_each(p, css, tset) { 3180 struct cgroup *from = tg_cgrp(task_group(p)); 3181 struct cgroup *to = tg_cgrp(css_tg(css)); 3182 3183 WARN_ON_ONCE(p->scx.cgrp_moving_from); 3184 3185 /* 3186 * sched_move_task() omits identity migrations. Let's match the 3187 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move() 3188 * always match one-to-one. 3189 */ 3190 if (from == to) 3191 continue; 3192 3193 if (SCX_HAS_OP(sch, cgroup_prep_move)) { 3194 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, 3195 cgroup_prep_move, NULL, 3196 p, from, css->cgroup); 3197 if (ret) 3198 goto err; 3199 } 3200 3201 p->scx.cgrp_moving_from = from; 3202 } 3203 3204 return 0; 3205 3206 err: 3207 cgroup_taskset_for_each(p, css, tset) { 3208 if (SCX_HAS_OP(sch, cgroup_cancel_move) && 3209 p->scx.cgrp_moving_from) 3210 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL, 3211 p, p->scx.cgrp_moving_from, css->cgroup); 3212 p->scx.cgrp_moving_from = NULL; 3213 } 3214 3215 return ops_sanitize_err(sch, "cgroup_prep_move", ret); 3216 } 3217 3218 void scx_cgroup_move_task(struct task_struct *p) 3219 { 3220 struct scx_sched *sch = scx_root; 3221 3222 if (!scx_cgroup_enabled) 3223 return; 3224 3225 /* 3226 * @p must have ops.cgroup_prep_move() called on it and thus 3227 * cgrp_moving_from set. 3228 */ 3229 if (SCX_HAS_OP(sch, cgroup_move) && 3230 !WARN_ON_ONCE(!p->scx.cgrp_moving_from)) 3231 SCX_CALL_OP_TASK(sch, SCX_KF_UNLOCKED, cgroup_move, NULL, 3232 p, p->scx.cgrp_moving_from, 3233 tg_cgrp(task_group(p))); 3234 p->scx.cgrp_moving_from = NULL; 3235 } 3236 3237 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) 3238 { 3239 struct scx_sched *sch = scx_root; 3240 struct cgroup_subsys_state *css; 3241 struct task_struct *p; 3242 3243 if (!scx_cgroup_enabled) 3244 return; 3245 3246 cgroup_taskset_for_each(p, css, tset) { 3247 if (SCX_HAS_OP(sch, cgroup_cancel_move) && 3248 p->scx.cgrp_moving_from) 3249 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL, 3250 p, p->scx.cgrp_moving_from, css->cgroup); 3251 p->scx.cgrp_moving_from = NULL; 3252 } 3253 } 3254 3255 void scx_group_set_weight(struct task_group *tg, unsigned long weight) 3256 { 3257 struct scx_sched *sch = scx_root; 3258 3259 percpu_down_read(&scx_cgroup_ops_rwsem); 3260 3261 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) && 3262 tg->scx.weight != weight) 3263 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_weight, NULL, 3264 tg_cgrp(tg), weight); 3265 3266 tg->scx.weight = weight; 3267 3268 percpu_up_read(&scx_cgroup_ops_rwsem); 3269 } 3270 3271 void scx_group_set_idle(struct task_group *tg, bool idle) 3272 { 3273 struct scx_sched *sch = scx_root; 3274 3275 percpu_down_read(&scx_cgroup_ops_rwsem); 3276 3277 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_idle)) 3278 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_idle, NULL, 3279 tg_cgrp(tg), idle); 3280 3281 /* Update the task group's idle state */ 3282 tg->scx.idle = idle; 3283 3284 percpu_up_read(&scx_cgroup_ops_rwsem); 3285 } 3286 3287 void scx_group_set_bandwidth(struct task_group *tg, 3288 u64 period_us, u64 quota_us, u64 burst_us) 3289 { 3290 struct scx_sched *sch = scx_root; 3291 3292 percpu_down_read(&scx_cgroup_ops_rwsem); 3293 3294 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_bandwidth) && 3295 (tg->scx.bw_period_us != period_us || 3296 tg->scx.bw_quota_us != quota_us || 3297 tg->scx.bw_burst_us != burst_us)) 3298 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_bandwidth, NULL, 3299 tg_cgrp(tg), period_us, quota_us, burst_us); 3300 3301 tg->scx.bw_period_us = period_us; 3302 tg->scx.bw_quota_us = quota_us; 3303 tg->scx.bw_burst_us = burst_us; 3304 3305 percpu_up_read(&scx_cgroup_ops_rwsem); 3306 } 3307 3308 static void scx_cgroup_lock(void) 3309 { 3310 percpu_down_write(&scx_cgroup_ops_rwsem); 3311 cgroup_lock(); 3312 } 3313 3314 static void scx_cgroup_unlock(void) 3315 { 3316 cgroup_unlock(); 3317 percpu_up_write(&scx_cgroup_ops_rwsem); 3318 } 3319 3320 #else /* CONFIG_EXT_GROUP_SCHED */ 3321 3322 static void scx_cgroup_lock(void) {} 3323 static void scx_cgroup_unlock(void) {} 3324 3325 #endif /* CONFIG_EXT_GROUP_SCHED */ 3326 3327 /* 3328 * Omitted operations: 3329 * 3330 * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task 3331 * isn't tied to the CPU at that point. Preemption is implemented by resetting 3332 * the victim task's slice to 0 and triggering reschedule on the target CPU. 3333 * 3334 * - migrate_task_rq: Unnecessary as task to cpu mapping is transient. 3335 * 3336 * - task_fork/dead: We need fork/dead notifications for all tasks regardless of 3337 * their current sched_class. Call them directly from sched core instead. 3338 */ 3339 DEFINE_SCHED_CLASS(ext) = { 3340 .enqueue_task = enqueue_task_scx, 3341 .dequeue_task = dequeue_task_scx, 3342 .yield_task = yield_task_scx, 3343 .yield_to_task = yield_to_task_scx, 3344 3345 .wakeup_preempt = wakeup_preempt_scx, 3346 3347 .pick_task = pick_task_scx, 3348 3349 .put_prev_task = put_prev_task_scx, 3350 .set_next_task = set_next_task_scx, 3351 3352 .select_task_rq = select_task_rq_scx, 3353 .task_woken = task_woken_scx, 3354 .set_cpus_allowed = set_cpus_allowed_scx, 3355 3356 .rq_online = rq_online_scx, 3357 .rq_offline = rq_offline_scx, 3358 3359 .task_tick = task_tick_scx, 3360 3361 .switching_to = switching_to_scx, 3362 .switched_from = switched_from_scx, 3363 .switched_to = switched_to_scx, 3364 .reweight_task = reweight_task_scx, 3365 .prio_changed = prio_changed_scx, 3366 3367 .update_curr = update_curr_scx, 3368 3369 #ifdef CONFIG_UCLAMP_TASK 3370 .uclamp_enabled = 1, 3371 #endif 3372 }; 3373 3374 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id) 3375 { 3376 memset(dsq, 0, sizeof(*dsq)); 3377 3378 raw_spin_lock_init(&dsq->lock); 3379 INIT_LIST_HEAD(&dsq->list); 3380 dsq->id = dsq_id; 3381 } 3382 3383 static void free_dsq_irq_workfn(struct irq_work *irq_work) 3384 { 3385 struct llist_node *to_free = llist_del_all(&dsqs_to_free); 3386 struct scx_dispatch_q *dsq, *tmp_dsq; 3387 3388 llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node) 3389 kfree_rcu(dsq, rcu); 3390 } 3391 3392 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn); 3393 3394 static void destroy_dsq(struct scx_sched *sch, u64 dsq_id) 3395 { 3396 struct scx_dispatch_q *dsq; 3397 unsigned long flags; 3398 3399 rcu_read_lock(); 3400 3401 dsq = find_user_dsq(sch, dsq_id); 3402 if (!dsq) 3403 goto out_unlock_rcu; 3404 3405 raw_spin_lock_irqsave(&dsq->lock, flags); 3406 3407 if (dsq->nr) { 3408 scx_error(sch, "attempting to destroy in-use dsq 0x%016llx (nr=%u)", 3409 dsq->id, dsq->nr); 3410 goto out_unlock_dsq; 3411 } 3412 3413 if (rhashtable_remove_fast(&sch->dsq_hash, &dsq->hash_node, 3414 dsq_hash_params)) 3415 goto out_unlock_dsq; 3416 3417 /* 3418 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from 3419 * queueing more tasks. As this function can be called from anywhere, 3420 * freeing is bounced through an irq work to avoid nesting RCU 3421 * operations inside scheduler locks. 3422 */ 3423 dsq->id = SCX_DSQ_INVALID; 3424 llist_add(&dsq->free_node, &dsqs_to_free); 3425 irq_work_queue(&free_dsq_irq_work); 3426 3427 out_unlock_dsq: 3428 raw_spin_unlock_irqrestore(&dsq->lock, flags); 3429 out_unlock_rcu: 3430 rcu_read_unlock(); 3431 } 3432 3433 #ifdef CONFIG_EXT_GROUP_SCHED 3434 static void scx_cgroup_exit(struct scx_sched *sch) 3435 { 3436 struct cgroup_subsys_state *css; 3437 3438 scx_cgroup_enabled = false; 3439 3440 /* 3441 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk 3442 * cgroups and exit all the inited ones, all online cgroups are exited. 3443 */ 3444 css_for_each_descendant_post(css, &root_task_group.css) { 3445 struct task_group *tg = css_tg(css); 3446 3447 if (!(tg->scx.flags & SCX_TG_INITED)) 3448 continue; 3449 tg->scx.flags &= ~SCX_TG_INITED; 3450 3451 if (!sch->ops.cgroup_exit) 3452 continue; 3453 3454 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL, 3455 css->cgroup); 3456 } 3457 } 3458 3459 static int scx_cgroup_init(struct scx_sched *sch) 3460 { 3461 struct cgroup_subsys_state *css; 3462 int ret; 3463 3464 /* 3465 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk 3466 * cgroups and init, all online cgroups are initialized. 3467 */ 3468 css_for_each_descendant_pre(css, &root_task_group.css) { 3469 struct task_group *tg = css_tg(css); 3470 struct scx_cgroup_init_args args = { 3471 .weight = tg->scx.weight, 3472 .bw_period_us = tg->scx.bw_period_us, 3473 .bw_quota_us = tg->scx.bw_quota_us, 3474 .bw_burst_us = tg->scx.bw_burst_us, 3475 }; 3476 3477 if ((tg->scx.flags & 3478 (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE) 3479 continue; 3480 3481 if (!sch->ops.cgroup_init) { 3482 tg->scx.flags |= SCX_TG_INITED; 3483 continue; 3484 } 3485 3486 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, NULL, 3487 css->cgroup, &args); 3488 if (ret) { 3489 css_put(css); 3490 scx_error(sch, "ops.cgroup_init() failed (%d)", ret); 3491 return ret; 3492 } 3493 tg->scx.flags |= SCX_TG_INITED; 3494 } 3495 3496 WARN_ON_ONCE(scx_cgroup_enabled); 3497 scx_cgroup_enabled = true; 3498 3499 return 0; 3500 } 3501 3502 #else 3503 static void scx_cgroup_exit(struct scx_sched *sch) {} 3504 static int scx_cgroup_init(struct scx_sched *sch) { return 0; } 3505 #endif 3506 3507 3508 /******************************************************************************** 3509 * Sysfs interface and ops enable/disable. 3510 */ 3511 3512 #define SCX_ATTR(_name) \ 3513 static struct kobj_attribute scx_attr_##_name = { \ 3514 .attr = { .name = __stringify(_name), .mode = 0444 }, \ 3515 .show = scx_attr_##_name##_show, \ 3516 } 3517 3518 static ssize_t scx_attr_state_show(struct kobject *kobj, 3519 struct kobj_attribute *ka, char *buf) 3520 { 3521 return sysfs_emit(buf, "%s\n", scx_enable_state_str[scx_enable_state()]); 3522 } 3523 SCX_ATTR(state); 3524 3525 static ssize_t scx_attr_switch_all_show(struct kobject *kobj, 3526 struct kobj_attribute *ka, char *buf) 3527 { 3528 return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all)); 3529 } 3530 SCX_ATTR(switch_all); 3531 3532 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj, 3533 struct kobj_attribute *ka, char *buf) 3534 { 3535 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected)); 3536 } 3537 SCX_ATTR(nr_rejected); 3538 3539 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj, 3540 struct kobj_attribute *ka, char *buf) 3541 { 3542 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq)); 3543 } 3544 SCX_ATTR(hotplug_seq); 3545 3546 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj, 3547 struct kobj_attribute *ka, char *buf) 3548 { 3549 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq)); 3550 } 3551 SCX_ATTR(enable_seq); 3552 3553 static struct attribute *scx_global_attrs[] = { 3554 &scx_attr_state.attr, 3555 &scx_attr_switch_all.attr, 3556 &scx_attr_nr_rejected.attr, 3557 &scx_attr_hotplug_seq.attr, 3558 &scx_attr_enable_seq.attr, 3559 NULL, 3560 }; 3561 3562 static const struct attribute_group scx_global_attr_group = { 3563 .attrs = scx_global_attrs, 3564 }; 3565 3566 static void free_exit_info(struct scx_exit_info *ei); 3567 3568 static void scx_sched_free_rcu_work(struct work_struct *work) 3569 { 3570 struct rcu_work *rcu_work = to_rcu_work(work); 3571 struct scx_sched *sch = container_of(rcu_work, struct scx_sched, rcu_work); 3572 struct rhashtable_iter rht_iter; 3573 struct scx_dispatch_q *dsq; 3574 int node; 3575 3576 irq_work_sync(&sch->error_irq_work); 3577 kthread_stop(sch->helper->task); 3578 3579 free_percpu(sch->pcpu); 3580 3581 for_each_node_state(node, N_POSSIBLE) 3582 kfree(sch->global_dsqs[node]); 3583 kfree(sch->global_dsqs); 3584 3585 rhashtable_walk_enter(&sch->dsq_hash, &rht_iter); 3586 do { 3587 rhashtable_walk_start(&rht_iter); 3588 3589 while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq)) 3590 destroy_dsq(sch, dsq->id); 3591 3592 rhashtable_walk_stop(&rht_iter); 3593 } while (dsq == ERR_PTR(-EAGAIN)); 3594 rhashtable_walk_exit(&rht_iter); 3595 3596 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL); 3597 free_exit_info(sch->exit_info); 3598 kfree(sch); 3599 } 3600 3601 static void scx_kobj_release(struct kobject *kobj) 3602 { 3603 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj); 3604 3605 INIT_RCU_WORK(&sch->rcu_work, scx_sched_free_rcu_work); 3606 queue_rcu_work(system_unbound_wq, &sch->rcu_work); 3607 } 3608 3609 static ssize_t scx_attr_ops_show(struct kobject *kobj, 3610 struct kobj_attribute *ka, char *buf) 3611 { 3612 return sysfs_emit(buf, "%s\n", scx_root->ops.name); 3613 } 3614 SCX_ATTR(ops); 3615 3616 #define scx_attr_event_show(buf, at, events, kind) ({ \ 3617 sysfs_emit_at(buf, at, "%s %llu\n", #kind, (events)->kind); \ 3618 }) 3619 3620 static ssize_t scx_attr_events_show(struct kobject *kobj, 3621 struct kobj_attribute *ka, char *buf) 3622 { 3623 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj); 3624 struct scx_event_stats events; 3625 int at = 0; 3626 3627 scx_read_events(sch, &events); 3628 at += scx_attr_event_show(buf, at, &events, SCX_EV_SELECT_CPU_FALLBACK); 3629 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE); 3630 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_KEEP_LAST); 3631 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_EXITING); 3632 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED); 3633 at += scx_attr_event_show(buf, at, &events, SCX_EV_REFILL_SLICE_DFL); 3634 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DURATION); 3635 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DISPATCH); 3636 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_ACTIVATE); 3637 return at; 3638 } 3639 SCX_ATTR(events); 3640 3641 static struct attribute *scx_sched_attrs[] = { 3642 &scx_attr_ops.attr, 3643 &scx_attr_events.attr, 3644 NULL, 3645 }; 3646 ATTRIBUTE_GROUPS(scx_sched); 3647 3648 static const struct kobj_type scx_ktype = { 3649 .release = scx_kobj_release, 3650 .sysfs_ops = &kobj_sysfs_ops, 3651 .default_groups = scx_sched_groups, 3652 }; 3653 3654 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) 3655 { 3656 return add_uevent_var(env, "SCXOPS=%s", scx_root->ops.name); 3657 } 3658 3659 static const struct kset_uevent_ops scx_uevent_ops = { 3660 .uevent = scx_uevent, 3661 }; 3662 3663 /* 3664 * Used by sched_fork() and __setscheduler_prio() to pick the matching 3665 * sched_class. dl/rt are already handled. 3666 */ 3667 bool task_should_scx(int policy) 3668 { 3669 if (!scx_enabled() || unlikely(scx_enable_state() == SCX_DISABLING)) 3670 return false; 3671 if (READ_ONCE(scx_switching_all)) 3672 return true; 3673 return policy == SCHED_EXT; 3674 } 3675 3676 bool scx_allow_ttwu_queue(const struct task_struct *p) 3677 { 3678 struct scx_sched *sch; 3679 3680 if (!scx_enabled()) 3681 return true; 3682 3683 sch = rcu_dereference_sched(scx_root); 3684 if (unlikely(!sch)) 3685 return true; 3686 3687 if (sch->ops.flags & SCX_OPS_ALLOW_QUEUED_WAKEUP) 3688 return true; 3689 3690 if (unlikely(p->sched_class != &ext_sched_class)) 3691 return true; 3692 3693 return false; 3694 } 3695 3696 /** 3697 * handle_lockup - sched_ext common lockup handler 3698 * @fmt: format string 3699 * 3700 * Called on system stall or lockup condition and initiates abort of sched_ext 3701 * if enabled, which may resolve the reported lockup. 3702 * 3703 * Returns %true if sched_ext is enabled and abort was initiated, which may 3704 * resolve the lockup. %false if sched_ext is not enabled or abort was already 3705 * initiated by someone else. 3706 */ 3707 static __printf(1, 2) bool handle_lockup(const char *fmt, ...) 3708 { 3709 struct scx_sched *sch; 3710 va_list args; 3711 bool ret; 3712 3713 guard(rcu)(); 3714 3715 sch = rcu_dereference(scx_root); 3716 if (unlikely(!sch)) 3717 return false; 3718 3719 switch (scx_enable_state()) { 3720 case SCX_ENABLING: 3721 case SCX_ENABLED: 3722 va_start(args, fmt); 3723 ret = scx_verror(sch, fmt, args); 3724 va_end(args); 3725 return ret; 3726 default: 3727 return false; 3728 } 3729 } 3730 3731 /** 3732 * scx_rcu_cpu_stall - sched_ext RCU CPU stall handler 3733 * 3734 * While there are various reasons why RCU CPU stalls can occur on a system 3735 * that may not be caused by the current BPF scheduler, try kicking out the 3736 * current scheduler in an attempt to recover the system to a good state before 3737 * issuing panics. 3738 * 3739 * Returns %true if sched_ext is enabled and abort was initiated, which may 3740 * resolve the reported RCU stall. %false if sched_ext is not enabled or someone 3741 * else already initiated abort. 3742 */ 3743 bool scx_rcu_cpu_stall(void) 3744 { 3745 return handle_lockup("RCU CPU stall detected!"); 3746 } 3747 3748 /** 3749 * scx_softlockup - sched_ext softlockup handler 3750 * @dur_s: number of seconds of CPU stuck due to soft lockup 3751 * 3752 * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can 3753 * live-lock the system by making many CPUs target the same DSQ to the point 3754 * where soft-lockup detection triggers. This function is called from 3755 * soft-lockup watchdog when the triggering point is close and tries to unjam 3756 * the system and aborting the BPF scheduler. 3757 */ 3758 void scx_softlockup(u32 dur_s) 3759 { 3760 if (!handle_lockup("soft lockup - CPU %d stuck for %us", smp_processor_id(), dur_s)) 3761 return; 3762 3763 printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU %d stuck for %us, disabling BPF scheduler\n", 3764 smp_processor_id(), dur_s); 3765 } 3766 3767 /** 3768 * scx_hardlockup - sched_ext hardlockup handler 3769 * 3770 * A poorly behaving BPF scheduler can trigger hard lockup by e.g. putting 3771 * numerous affinitized tasks in a single queue and directing all CPUs at it. 3772 * Try kicking out the current scheduler in an attempt to recover the system to 3773 * a good state before taking more drastic actions. 3774 * 3775 * Returns %true if sched_ext is enabled and abort was initiated, which may 3776 * resolve the reported hardlockdup. %false if sched_ext is not enabled or 3777 * someone else already initiated abort. 3778 */ 3779 bool scx_hardlockup(int cpu) 3780 { 3781 if (!handle_lockup("hard lockup - CPU %d", cpu)) 3782 return false; 3783 3784 printk_deferred(KERN_ERR "sched_ext: Hard lockup - CPU %d, disabling BPF scheduler\n", 3785 cpu); 3786 return true; 3787 } 3788 3789 static u32 bypass_lb_cpu(struct scx_sched *sch, struct rq *rq, 3790 struct cpumask *donee_mask, struct cpumask *resched_mask, 3791 u32 nr_donor_target, u32 nr_donee_target) 3792 { 3793 struct scx_dispatch_q *donor_dsq = &rq->scx.bypass_dsq; 3794 struct task_struct *p, *n; 3795 struct scx_dsq_list_node cursor = INIT_DSQ_LIST_CURSOR(cursor, 0, 0); 3796 s32 delta = READ_ONCE(donor_dsq->nr) - nr_donor_target; 3797 u32 nr_balanced = 0, min_delta_us; 3798 3799 /* 3800 * All we want to guarantee is reasonable forward progress. No reason to 3801 * fine tune. Assuming every task on @donor_dsq runs their full slice, 3802 * consider offloading iff the total queued duration is over the 3803 * threshold. 3804 */ 3805 min_delta_us = scx_bypass_lb_intv_us / SCX_BYPASS_LB_MIN_DELTA_DIV; 3806 if (delta < DIV_ROUND_UP(min_delta_us, scx_slice_bypass_us)) 3807 return 0; 3808 3809 raw_spin_rq_lock_irq(rq); 3810 raw_spin_lock(&donor_dsq->lock); 3811 list_add(&cursor.node, &donor_dsq->list); 3812 resume: 3813 n = container_of(&cursor, struct task_struct, scx.dsq_list); 3814 n = nldsq_next_task(donor_dsq, n, false); 3815 3816 while ((p = n)) { 3817 struct rq *donee_rq; 3818 struct scx_dispatch_q *donee_dsq; 3819 int donee; 3820 3821 n = nldsq_next_task(donor_dsq, n, false); 3822 3823 if (donor_dsq->nr <= nr_donor_target) 3824 break; 3825 3826 if (cpumask_empty(donee_mask)) 3827 break; 3828 3829 donee = cpumask_any_and_distribute(donee_mask, p->cpus_ptr); 3830 if (donee >= nr_cpu_ids) 3831 continue; 3832 3833 donee_rq = cpu_rq(donee); 3834 donee_dsq = &donee_rq->scx.bypass_dsq; 3835 3836 /* 3837 * $p's rq is not locked but $p's DSQ lock protects its 3838 * scheduling properties making this test safe. 3839 */ 3840 if (!task_can_run_on_remote_rq(sch, p, donee_rq, false)) 3841 continue; 3842 3843 /* 3844 * Moving $p from one non-local DSQ to another. The source rq 3845 * and DSQ are already locked. Do an abbreviated dequeue and 3846 * then perform enqueue without unlocking $donor_dsq. 3847 * 3848 * We don't want to drop and reacquire the lock on each 3849 * iteration as @donor_dsq can be very long and potentially 3850 * highly contended. Donee DSQs are less likely to be contended. 3851 * The nested locking is safe as only this LB moves tasks 3852 * between bypass DSQs. 3853 */ 3854 dispatch_dequeue_locked(p, donor_dsq); 3855 dispatch_enqueue(sch, donee_dsq, p, SCX_ENQ_NESTED); 3856 3857 /* 3858 * $donee might have been idle and need to be woken up. No need 3859 * to be clever. Kick every CPU that receives tasks. 3860 */ 3861 cpumask_set_cpu(donee, resched_mask); 3862 3863 if (READ_ONCE(donee_dsq->nr) >= nr_donee_target) 3864 cpumask_clear_cpu(donee, donee_mask); 3865 3866 nr_balanced++; 3867 if (!(nr_balanced % SCX_BYPASS_LB_BATCH) && n) { 3868 list_move_tail(&cursor.node, &n->scx.dsq_list.node); 3869 raw_spin_unlock(&donor_dsq->lock); 3870 raw_spin_rq_unlock_irq(rq); 3871 cpu_relax(); 3872 raw_spin_rq_lock_irq(rq); 3873 raw_spin_lock(&donor_dsq->lock); 3874 goto resume; 3875 } 3876 } 3877 3878 list_del_init(&cursor.node); 3879 raw_spin_unlock(&donor_dsq->lock); 3880 raw_spin_rq_unlock_irq(rq); 3881 3882 return nr_balanced; 3883 } 3884 3885 static void bypass_lb_node(struct scx_sched *sch, int node) 3886 { 3887 const struct cpumask *node_mask = cpumask_of_node(node); 3888 struct cpumask *donee_mask = scx_bypass_lb_donee_cpumask; 3889 struct cpumask *resched_mask = scx_bypass_lb_resched_cpumask; 3890 u32 nr_tasks = 0, nr_cpus = 0, nr_balanced = 0; 3891 u32 nr_target, nr_donor_target; 3892 u32 before_min = U32_MAX, before_max = 0; 3893 u32 after_min = U32_MAX, after_max = 0; 3894 int cpu; 3895 3896 /* count the target tasks and CPUs */ 3897 for_each_cpu_and(cpu, cpu_online_mask, node_mask) { 3898 u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr); 3899 3900 nr_tasks += nr; 3901 nr_cpus++; 3902 3903 before_min = min(nr, before_min); 3904 before_max = max(nr, before_max); 3905 } 3906 3907 if (!nr_cpus) 3908 return; 3909 3910 /* 3911 * We don't want CPUs to have more than $nr_donor_target tasks and 3912 * balancing to fill donee CPUs upto $nr_target. Once targets are 3913 * calculated, find the donee CPUs. 3914 */ 3915 nr_target = DIV_ROUND_UP(nr_tasks, nr_cpus); 3916 nr_donor_target = DIV_ROUND_UP(nr_target * SCX_BYPASS_LB_DONOR_PCT, 100); 3917 3918 cpumask_clear(donee_mask); 3919 for_each_cpu_and(cpu, cpu_online_mask, node_mask) { 3920 if (READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr) < nr_target) 3921 cpumask_set_cpu(cpu, donee_mask); 3922 } 3923 3924 /* iterate !donee CPUs and see if they should be offloaded */ 3925 cpumask_clear(resched_mask); 3926 for_each_cpu_and(cpu, cpu_online_mask, node_mask) { 3927 struct rq *rq = cpu_rq(cpu); 3928 struct scx_dispatch_q *donor_dsq = &rq->scx.bypass_dsq; 3929 3930 if (cpumask_empty(donee_mask)) 3931 break; 3932 if (cpumask_test_cpu(cpu, donee_mask)) 3933 continue; 3934 if (READ_ONCE(donor_dsq->nr) <= nr_donor_target) 3935 continue; 3936 3937 nr_balanced += bypass_lb_cpu(sch, rq, donee_mask, resched_mask, 3938 nr_donor_target, nr_target); 3939 } 3940 3941 for_each_cpu(cpu, resched_mask) { 3942 struct rq *rq = cpu_rq(cpu); 3943 3944 raw_spin_rq_lock_irq(rq); 3945 resched_curr(rq); 3946 raw_spin_rq_unlock_irq(rq); 3947 } 3948 3949 for_each_cpu_and(cpu, cpu_online_mask, node_mask) { 3950 u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr); 3951 3952 after_min = min(nr, after_min); 3953 after_max = max(nr, after_max); 3954 3955 } 3956 3957 trace_sched_ext_bypass_lb(node, nr_cpus, nr_tasks, nr_balanced, 3958 before_min, before_max, after_min, after_max); 3959 } 3960 3961 /* 3962 * In bypass mode, all tasks are put on the per-CPU bypass DSQs. If the machine 3963 * is over-saturated and the BPF scheduler skewed tasks into few CPUs, some 3964 * bypass DSQs can be overloaded. If there are enough tasks to saturate other 3965 * lightly loaded CPUs, such imbalance can lead to very high execution latency 3966 * on the overloaded CPUs and thus to hung tasks and RCU stalls. To avoid such 3967 * outcomes, a simple load balancing mechanism is implemented by the following 3968 * timer which runs periodically while bypass mode is in effect. 3969 */ 3970 static void scx_bypass_lb_timerfn(struct timer_list *timer) 3971 { 3972 struct scx_sched *sch; 3973 int node; 3974 u32 intv_us; 3975 3976 sch = rcu_dereference_all(scx_root); 3977 if (unlikely(!sch) || !READ_ONCE(scx_bypass_depth)) 3978 return; 3979 3980 for_each_node_with_cpus(node) 3981 bypass_lb_node(sch, node); 3982 3983 intv_us = READ_ONCE(scx_bypass_lb_intv_us); 3984 if (intv_us) 3985 mod_timer(timer, jiffies + usecs_to_jiffies(intv_us)); 3986 } 3987 3988 static DEFINE_TIMER(scx_bypass_lb_timer, scx_bypass_lb_timerfn); 3989 3990 /** 3991 * scx_bypass - [Un]bypass scx_ops and guarantee forward progress 3992 * @bypass: true for bypass, false for unbypass 3993 * 3994 * Bypassing guarantees that all runnable tasks make forward progress without 3995 * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might 3996 * be held by tasks that the BPF scheduler is forgetting to run, which 3997 * unfortunately also excludes toggling the static branches. 3998 * 3999 * Let's work around by overriding a couple ops and modifying behaviors based on 4000 * the DISABLING state and then cycling the queued tasks through dequeue/enqueue 4001 * to force global FIFO scheduling. 4002 * 4003 * - ops.select_cpu() is ignored and the default select_cpu() is used. 4004 * 4005 * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order. 4006 * %SCX_OPS_ENQ_LAST is also ignored. 4007 * 4008 * - ops.dispatch() is ignored. 4009 * 4010 * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice 4011 * can't be trusted. Whenever a tick triggers, the running task is rotated to 4012 * the tail of the queue with core_sched_at touched. 4013 * 4014 * - pick_next_task() suppresses zero slice warning. 4015 * 4016 * - scx_kick_cpu() is disabled to avoid irq_work malfunction during PM 4017 * operations. 4018 * 4019 * - scx_prio_less() reverts to the default core_sched_at order. 4020 */ 4021 static void scx_bypass(bool bypass) 4022 { 4023 static DEFINE_RAW_SPINLOCK(bypass_lock); 4024 static unsigned long bypass_timestamp; 4025 struct scx_sched *sch; 4026 unsigned long flags; 4027 int cpu; 4028 4029 raw_spin_lock_irqsave(&bypass_lock, flags); 4030 sch = rcu_dereference_bh(scx_root); 4031 4032 if (bypass) { 4033 u32 intv_us; 4034 4035 WRITE_ONCE(scx_bypass_depth, scx_bypass_depth + 1); 4036 WARN_ON_ONCE(scx_bypass_depth <= 0); 4037 if (scx_bypass_depth != 1) 4038 goto unlock; 4039 WRITE_ONCE(scx_slice_dfl, scx_slice_bypass_us * NSEC_PER_USEC); 4040 bypass_timestamp = ktime_get_ns(); 4041 if (sch) 4042 scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1); 4043 4044 intv_us = READ_ONCE(scx_bypass_lb_intv_us); 4045 if (intv_us && !timer_pending(&scx_bypass_lb_timer)) { 4046 scx_bypass_lb_timer.expires = 4047 jiffies + usecs_to_jiffies(intv_us); 4048 add_timer_global(&scx_bypass_lb_timer); 4049 } 4050 } else { 4051 WRITE_ONCE(scx_bypass_depth, scx_bypass_depth - 1); 4052 WARN_ON_ONCE(scx_bypass_depth < 0); 4053 if (scx_bypass_depth != 0) 4054 goto unlock; 4055 WRITE_ONCE(scx_slice_dfl, SCX_SLICE_DFL); 4056 if (sch) 4057 scx_add_event(sch, SCX_EV_BYPASS_DURATION, 4058 ktime_get_ns() - bypass_timestamp); 4059 } 4060 4061 /* 4062 * No task property is changing. We just need to make sure all currently 4063 * queued tasks are re-queued according to the new scx_rq_bypassing() 4064 * state. As an optimization, walk each rq's runnable_list instead of 4065 * the scx_tasks list. 4066 * 4067 * This function can't trust the scheduler and thus can't use 4068 * cpus_read_lock(). Walk all possible CPUs instead of online. 4069 */ 4070 for_each_possible_cpu(cpu) { 4071 struct rq *rq = cpu_rq(cpu); 4072 struct task_struct *p, *n; 4073 4074 raw_spin_rq_lock(rq); 4075 4076 if (bypass) { 4077 WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING); 4078 rq->scx.flags |= SCX_RQ_BYPASSING; 4079 } else { 4080 WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING)); 4081 rq->scx.flags &= ~SCX_RQ_BYPASSING; 4082 } 4083 4084 /* 4085 * We need to guarantee that no tasks are on the BPF scheduler 4086 * while bypassing. Either we see enabled or the enable path 4087 * sees scx_rq_bypassing() before moving tasks to SCX. 4088 */ 4089 if (!scx_enabled()) { 4090 raw_spin_rq_unlock(rq); 4091 continue; 4092 } 4093 4094 /* 4095 * The use of list_for_each_entry_safe_reverse() is required 4096 * because each task is going to be removed from and added back 4097 * to the runnable_list during iteration. Because they're added 4098 * to the tail of the list, safe reverse iteration can still 4099 * visit all nodes. 4100 */ 4101 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list, 4102 scx.runnable_node) { 4103 /* cycling deq/enq is enough, see the function comment */ 4104 scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_MOVE) { 4105 /* nothing */ ; 4106 } 4107 } 4108 4109 /* resched to restore ticks and idle state */ 4110 if (cpu_online(cpu) || cpu == smp_processor_id()) 4111 resched_curr(rq); 4112 4113 raw_spin_rq_unlock(rq); 4114 } 4115 4116 unlock: 4117 raw_spin_unlock_irqrestore(&bypass_lock, flags); 4118 } 4119 4120 static void free_exit_info(struct scx_exit_info *ei) 4121 { 4122 kvfree(ei->dump); 4123 kfree(ei->msg); 4124 kfree(ei->bt); 4125 kfree(ei); 4126 } 4127 4128 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len) 4129 { 4130 struct scx_exit_info *ei; 4131 4132 ei = kzalloc(sizeof(*ei), GFP_KERNEL); 4133 if (!ei) 4134 return NULL; 4135 4136 ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL); 4137 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL); 4138 ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL); 4139 4140 if (!ei->bt || !ei->msg || !ei->dump) { 4141 free_exit_info(ei); 4142 return NULL; 4143 } 4144 4145 return ei; 4146 } 4147 4148 static const char *scx_exit_reason(enum scx_exit_kind kind) 4149 { 4150 switch (kind) { 4151 case SCX_EXIT_UNREG: 4152 return "unregistered from user space"; 4153 case SCX_EXIT_UNREG_BPF: 4154 return "unregistered from BPF"; 4155 case SCX_EXIT_UNREG_KERN: 4156 return "unregistered from the main kernel"; 4157 case SCX_EXIT_SYSRQ: 4158 return "disabled by sysrq-S"; 4159 case SCX_EXIT_ERROR: 4160 return "runtime error"; 4161 case SCX_EXIT_ERROR_BPF: 4162 return "scx_bpf_error"; 4163 case SCX_EXIT_ERROR_STALL: 4164 return "runnable task stall"; 4165 default: 4166 return "<UNKNOWN>"; 4167 } 4168 } 4169 4170 static void free_kick_syncs(void) 4171 { 4172 int cpu; 4173 4174 for_each_possible_cpu(cpu) { 4175 struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu); 4176 struct scx_kick_syncs *to_free; 4177 4178 to_free = rcu_replace_pointer(*ksyncs, NULL, true); 4179 if (to_free) 4180 kvfree_rcu(to_free, rcu); 4181 } 4182 } 4183 4184 static void scx_disable_workfn(struct kthread_work *work) 4185 { 4186 struct scx_sched *sch = container_of(work, struct scx_sched, disable_work); 4187 struct scx_exit_info *ei = sch->exit_info; 4188 struct scx_task_iter sti; 4189 struct task_struct *p; 4190 int kind, cpu; 4191 4192 kind = atomic_read(&sch->exit_kind); 4193 while (true) { 4194 if (kind == SCX_EXIT_DONE) /* already disabled? */ 4195 return; 4196 WARN_ON_ONCE(kind == SCX_EXIT_NONE); 4197 if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE)) 4198 break; 4199 } 4200 ei->kind = kind; 4201 ei->reason = scx_exit_reason(ei->kind); 4202 4203 /* guarantee forward progress by bypassing scx_ops */ 4204 scx_bypass(true); 4205 WRITE_ONCE(scx_aborting, false); 4206 4207 switch (scx_set_enable_state(SCX_DISABLING)) { 4208 case SCX_DISABLING: 4209 WARN_ONCE(true, "sched_ext: duplicate disabling instance?"); 4210 break; 4211 case SCX_DISABLED: 4212 pr_warn("sched_ext: ops error detected without ops (%s)\n", 4213 sch->exit_info->msg); 4214 WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING); 4215 goto done; 4216 default: 4217 break; 4218 } 4219 4220 /* 4221 * Here, every runnable task is guaranteed to make forward progress and 4222 * we can safely use blocking synchronization constructs. Actually 4223 * disable ops. 4224 */ 4225 mutex_lock(&scx_enable_mutex); 4226 4227 static_branch_disable(&__scx_switched_all); 4228 WRITE_ONCE(scx_switching_all, false); 4229 4230 /* 4231 * Shut down cgroup support before tasks so that the cgroup attach path 4232 * doesn't race against scx_exit_task(). 4233 */ 4234 scx_cgroup_lock(); 4235 scx_cgroup_exit(sch); 4236 scx_cgroup_unlock(); 4237 4238 /* 4239 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones 4240 * must be switched out and exited synchronously. 4241 */ 4242 percpu_down_write(&scx_fork_rwsem); 4243 4244 scx_init_task_enabled = false; 4245 4246 scx_task_iter_start(&sti); 4247 while ((p = scx_task_iter_next_locked(&sti))) { 4248 unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 4249 const struct sched_class *old_class = p->sched_class; 4250 const struct sched_class *new_class = scx_setscheduler_class(p); 4251 4252 update_rq_clock(task_rq(p)); 4253 4254 if (old_class != new_class) 4255 queue_flags |= DEQUEUE_CLASS; 4256 4257 scoped_guard (sched_change, p, queue_flags) { 4258 p->sched_class = new_class; 4259 } 4260 4261 scx_exit_task(p); 4262 } 4263 scx_task_iter_stop(&sti); 4264 percpu_up_write(&scx_fork_rwsem); 4265 4266 /* 4267 * Invalidate all the rq clocks to prevent getting outdated 4268 * rq clocks from a previous scx scheduler. 4269 */ 4270 for_each_possible_cpu(cpu) { 4271 struct rq *rq = cpu_rq(cpu); 4272 scx_rq_clock_invalidate(rq); 4273 } 4274 4275 /* no task is on scx, turn off all the switches and flush in-progress calls */ 4276 static_branch_disable(&__scx_enabled); 4277 bitmap_zero(sch->has_op, SCX_OPI_END); 4278 scx_idle_disable(); 4279 synchronize_rcu(); 4280 4281 if (ei->kind >= SCX_EXIT_ERROR) { 4282 pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n", 4283 sch->ops.name, ei->reason); 4284 4285 if (ei->msg[0] != '\0') 4286 pr_err("sched_ext: %s: %s\n", sch->ops.name, ei->msg); 4287 #ifdef CONFIG_STACKTRACE 4288 stack_trace_print(ei->bt, ei->bt_len, 2); 4289 #endif 4290 } else { 4291 pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n", 4292 sch->ops.name, ei->reason); 4293 } 4294 4295 if (sch->ops.exit) 4296 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, exit, NULL, ei); 4297 4298 cancel_delayed_work_sync(&scx_watchdog_work); 4299 4300 /* 4301 * scx_root clearing must be inside cpus_read_lock(). See 4302 * handle_hotplug(). 4303 */ 4304 cpus_read_lock(); 4305 RCU_INIT_POINTER(scx_root, NULL); 4306 cpus_read_unlock(); 4307 4308 /* 4309 * Delete the kobject from the hierarchy synchronously. Otherwise, sysfs 4310 * could observe an object of the same name still in the hierarchy when 4311 * the next scheduler is loaded. 4312 */ 4313 kobject_del(&sch->kobj); 4314 4315 free_percpu(scx_dsp_ctx); 4316 scx_dsp_ctx = NULL; 4317 scx_dsp_max_batch = 0; 4318 free_kick_syncs(); 4319 4320 mutex_unlock(&scx_enable_mutex); 4321 4322 WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING); 4323 done: 4324 scx_bypass(false); 4325 } 4326 4327 static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind) 4328 { 4329 int none = SCX_EXIT_NONE; 4330 4331 if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind)) 4332 return false; 4333 4334 /* 4335 * Some CPUs may be trapped in the dispatch paths. Set the aborting 4336 * flag to break potential live-lock scenarios, ensuring we can 4337 * successfully reach scx_bypass(). 4338 */ 4339 WRITE_ONCE(scx_aborting, true); 4340 return true; 4341 } 4342 4343 static void scx_disable(enum scx_exit_kind kind) 4344 { 4345 struct scx_sched *sch; 4346 4347 if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)) 4348 kind = SCX_EXIT_ERROR; 4349 4350 rcu_read_lock(); 4351 sch = rcu_dereference(scx_root); 4352 if (sch) { 4353 scx_claim_exit(sch, kind); 4354 kthread_queue_work(sch->helper, &sch->disable_work); 4355 } 4356 rcu_read_unlock(); 4357 } 4358 4359 static void dump_newline(struct seq_buf *s) 4360 { 4361 trace_sched_ext_dump(""); 4362 4363 /* @s may be zero sized and seq_buf triggers WARN if so */ 4364 if (s->size) 4365 seq_buf_putc(s, '\n'); 4366 } 4367 4368 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...) 4369 { 4370 va_list args; 4371 4372 #ifdef CONFIG_TRACEPOINTS 4373 if (trace_sched_ext_dump_enabled()) { 4374 /* protected by scx_dump_state()::dump_lock */ 4375 static char line_buf[SCX_EXIT_MSG_LEN]; 4376 4377 va_start(args, fmt); 4378 vscnprintf(line_buf, sizeof(line_buf), fmt, args); 4379 va_end(args); 4380 4381 trace_sched_ext_dump(line_buf); 4382 } 4383 #endif 4384 /* @s may be zero sized and seq_buf triggers WARN if so */ 4385 if (s->size) { 4386 va_start(args, fmt); 4387 seq_buf_vprintf(s, fmt, args); 4388 va_end(args); 4389 4390 seq_buf_putc(s, '\n'); 4391 } 4392 } 4393 4394 static void dump_stack_trace(struct seq_buf *s, const char *prefix, 4395 const unsigned long *bt, unsigned int len) 4396 { 4397 unsigned int i; 4398 4399 for (i = 0; i < len; i++) 4400 dump_line(s, "%s%pS", prefix, (void *)bt[i]); 4401 } 4402 4403 static void ops_dump_init(struct seq_buf *s, const char *prefix) 4404 { 4405 struct scx_dump_data *dd = &scx_dump_data; 4406 4407 lockdep_assert_irqs_disabled(); 4408 4409 dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */ 4410 dd->first = true; 4411 dd->cursor = 0; 4412 dd->s = s; 4413 dd->prefix = prefix; 4414 } 4415 4416 static void ops_dump_flush(void) 4417 { 4418 struct scx_dump_data *dd = &scx_dump_data; 4419 char *line = dd->buf.line; 4420 4421 if (!dd->cursor) 4422 return; 4423 4424 /* 4425 * There's something to flush and this is the first line. Insert a blank 4426 * line to distinguish ops dump. 4427 */ 4428 if (dd->first) { 4429 dump_newline(dd->s); 4430 dd->first = false; 4431 } 4432 4433 /* 4434 * There may be multiple lines in $line. Scan and emit each line 4435 * separately. 4436 */ 4437 while (true) { 4438 char *end = line; 4439 char c; 4440 4441 while (*end != '\n' && *end != '\0') 4442 end++; 4443 4444 /* 4445 * If $line overflowed, it may not have newline at the end. 4446 * Always emit with a newline. 4447 */ 4448 c = *end; 4449 *end = '\0'; 4450 dump_line(dd->s, "%s%s", dd->prefix, line); 4451 if (c == '\0') 4452 break; 4453 4454 /* move to the next line */ 4455 end++; 4456 if (*end == '\0') 4457 break; 4458 line = end; 4459 } 4460 4461 dd->cursor = 0; 4462 } 4463 4464 static void ops_dump_exit(void) 4465 { 4466 ops_dump_flush(); 4467 scx_dump_data.cpu = -1; 4468 } 4469 4470 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx, 4471 struct task_struct *p, char marker) 4472 { 4473 static unsigned long bt[SCX_EXIT_BT_LEN]; 4474 struct scx_sched *sch = scx_root; 4475 char dsq_id_buf[19] = "(n/a)"; 4476 unsigned long ops_state = atomic_long_read(&p->scx.ops_state); 4477 unsigned int bt_len = 0; 4478 4479 if (p->scx.dsq) 4480 scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx", 4481 (unsigned long long)p->scx.dsq->id); 4482 4483 dump_newline(s); 4484 dump_line(s, " %c%c %s[%d] %+ldms", 4485 marker, task_state_to_char(p), p->comm, p->pid, 4486 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies)); 4487 dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu", 4488 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK, 4489 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK, 4490 ops_state >> SCX_OPSS_QSEQ_SHIFT); 4491 dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s", 4492 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf); 4493 dump_line(s, " dsq_vtime=%llu slice=%llu weight=%u", 4494 p->scx.dsq_vtime, p->scx.slice, p->scx.weight); 4495 dump_line(s, " cpus=%*pb no_mig=%u", cpumask_pr_args(p->cpus_ptr), 4496 p->migration_disabled); 4497 4498 if (SCX_HAS_OP(sch, dump_task)) { 4499 ops_dump_init(s, " "); 4500 SCX_CALL_OP(sch, SCX_KF_REST, dump_task, NULL, dctx, p); 4501 ops_dump_exit(); 4502 } 4503 4504 #ifdef CONFIG_STACKTRACE 4505 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1); 4506 #endif 4507 if (bt_len) { 4508 dump_newline(s); 4509 dump_stack_trace(s, " ", bt, bt_len); 4510 } 4511 } 4512 4513 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len) 4514 { 4515 static DEFINE_SPINLOCK(dump_lock); 4516 static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n"; 4517 struct scx_sched *sch = scx_root; 4518 struct scx_dump_ctx dctx = { 4519 .kind = ei->kind, 4520 .exit_code = ei->exit_code, 4521 .reason = ei->reason, 4522 .at_ns = ktime_get_ns(), 4523 .at_jiffies = jiffies, 4524 }; 4525 struct seq_buf s; 4526 struct scx_event_stats events; 4527 unsigned long flags; 4528 char *buf; 4529 int cpu; 4530 4531 spin_lock_irqsave(&dump_lock, flags); 4532 4533 seq_buf_init(&s, ei->dump, dump_len); 4534 4535 if (ei->kind == SCX_EXIT_NONE) { 4536 dump_line(&s, "Debug dump triggered by %s", ei->reason); 4537 } else { 4538 dump_line(&s, "%s[%d] triggered exit kind %d:", 4539 current->comm, current->pid, ei->kind); 4540 dump_line(&s, " %s (%s)", ei->reason, ei->msg); 4541 dump_newline(&s); 4542 dump_line(&s, "Backtrace:"); 4543 dump_stack_trace(&s, " ", ei->bt, ei->bt_len); 4544 } 4545 4546 if (SCX_HAS_OP(sch, dump)) { 4547 ops_dump_init(&s, ""); 4548 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, dump, NULL, &dctx); 4549 ops_dump_exit(); 4550 } 4551 4552 dump_newline(&s); 4553 dump_line(&s, "CPU states"); 4554 dump_line(&s, "----------"); 4555 4556 for_each_possible_cpu(cpu) { 4557 struct rq *rq = cpu_rq(cpu); 4558 struct rq_flags rf; 4559 struct task_struct *p; 4560 struct seq_buf ns; 4561 size_t avail, used; 4562 bool idle; 4563 4564 rq_lock_irqsave(rq, &rf); 4565 4566 idle = list_empty(&rq->scx.runnable_list) && 4567 rq->curr->sched_class == &idle_sched_class; 4568 4569 if (idle && !SCX_HAS_OP(sch, dump_cpu)) 4570 goto next; 4571 4572 /* 4573 * We don't yet know whether ops.dump_cpu() will produce output 4574 * and we may want to skip the default CPU dump if it doesn't. 4575 * Use a nested seq_buf to generate the standard dump so that we 4576 * can decide whether to commit later. 4577 */ 4578 avail = seq_buf_get_buf(&s, &buf); 4579 seq_buf_init(&ns, buf, avail); 4580 4581 dump_newline(&ns); 4582 dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu ksync=%lu", 4583 cpu, rq->scx.nr_running, rq->scx.flags, 4584 rq->scx.cpu_released, rq->scx.ops_qseq, 4585 rq->scx.kick_sync); 4586 dump_line(&ns, " curr=%s[%d] class=%ps", 4587 rq->curr->comm, rq->curr->pid, 4588 rq->curr->sched_class); 4589 if (!cpumask_empty(rq->scx.cpus_to_kick)) 4590 dump_line(&ns, " cpus_to_kick : %*pb", 4591 cpumask_pr_args(rq->scx.cpus_to_kick)); 4592 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle)) 4593 dump_line(&ns, " idle_to_kick : %*pb", 4594 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle)); 4595 if (!cpumask_empty(rq->scx.cpus_to_preempt)) 4596 dump_line(&ns, " cpus_to_preempt: %*pb", 4597 cpumask_pr_args(rq->scx.cpus_to_preempt)); 4598 if (!cpumask_empty(rq->scx.cpus_to_wait)) 4599 dump_line(&ns, " cpus_to_wait : %*pb", 4600 cpumask_pr_args(rq->scx.cpus_to_wait)); 4601 4602 used = seq_buf_used(&ns); 4603 if (SCX_HAS_OP(sch, dump_cpu)) { 4604 ops_dump_init(&ns, " "); 4605 SCX_CALL_OP(sch, SCX_KF_REST, dump_cpu, NULL, 4606 &dctx, cpu, idle); 4607 ops_dump_exit(); 4608 } 4609 4610 /* 4611 * If idle && nothing generated by ops.dump_cpu(), there's 4612 * nothing interesting. Skip. 4613 */ 4614 if (idle && used == seq_buf_used(&ns)) 4615 goto next; 4616 4617 /* 4618 * $s may already have overflowed when $ns was created. If so, 4619 * calling commit on it will trigger BUG. 4620 */ 4621 if (avail) { 4622 seq_buf_commit(&s, seq_buf_used(&ns)); 4623 if (seq_buf_has_overflowed(&ns)) 4624 seq_buf_set_overflow(&s); 4625 } 4626 4627 if (rq->curr->sched_class == &ext_sched_class) 4628 scx_dump_task(&s, &dctx, rq->curr, '*'); 4629 4630 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) 4631 scx_dump_task(&s, &dctx, p, ' '); 4632 next: 4633 rq_unlock_irqrestore(rq, &rf); 4634 } 4635 4636 dump_newline(&s); 4637 dump_line(&s, "Event counters"); 4638 dump_line(&s, "--------------"); 4639 4640 scx_read_events(sch, &events); 4641 scx_dump_event(s, &events, SCX_EV_SELECT_CPU_FALLBACK); 4642 scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE); 4643 scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST); 4644 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_EXITING); 4645 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED); 4646 scx_dump_event(s, &events, SCX_EV_REFILL_SLICE_DFL); 4647 scx_dump_event(s, &events, SCX_EV_BYPASS_DURATION); 4648 scx_dump_event(s, &events, SCX_EV_BYPASS_DISPATCH); 4649 scx_dump_event(s, &events, SCX_EV_BYPASS_ACTIVATE); 4650 4651 if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker)) 4652 memcpy(ei->dump + dump_len - sizeof(trunc_marker), 4653 trunc_marker, sizeof(trunc_marker)); 4654 4655 spin_unlock_irqrestore(&dump_lock, flags); 4656 } 4657 4658 static void scx_error_irq_workfn(struct irq_work *irq_work) 4659 { 4660 struct scx_sched *sch = container_of(irq_work, struct scx_sched, error_irq_work); 4661 struct scx_exit_info *ei = sch->exit_info; 4662 4663 if (ei->kind >= SCX_EXIT_ERROR) 4664 scx_dump_state(ei, sch->ops.exit_dump_len); 4665 4666 kthread_queue_work(sch->helper, &sch->disable_work); 4667 } 4668 4669 static bool scx_vexit(struct scx_sched *sch, 4670 enum scx_exit_kind kind, s64 exit_code, 4671 const char *fmt, va_list args) 4672 { 4673 struct scx_exit_info *ei = sch->exit_info; 4674 4675 if (!scx_claim_exit(sch, kind)) 4676 return false; 4677 4678 ei->exit_code = exit_code; 4679 #ifdef CONFIG_STACKTRACE 4680 if (kind >= SCX_EXIT_ERROR) 4681 ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1); 4682 #endif 4683 vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args); 4684 4685 /* 4686 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again 4687 * in scx_disable_workfn(). 4688 */ 4689 ei->kind = kind; 4690 ei->reason = scx_exit_reason(ei->kind); 4691 4692 irq_work_queue(&sch->error_irq_work); 4693 return true; 4694 } 4695 4696 static int alloc_kick_syncs(void) 4697 { 4698 int cpu; 4699 4700 /* 4701 * Allocate per-CPU arrays sized by nr_cpu_ids. Use kvzalloc as size 4702 * can exceed percpu allocator limits on large machines. 4703 */ 4704 for_each_possible_cpu(cpu) { 4705 struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu); 4706 struct scx_kick_syncs *new_ksyncs; 4707 4708 WARN_ON_ONCE(rcu_access_pointer(*ksyncs)); 4709 4710 new_ksyncs = kvzalloc_node(struct_size(new_ksyncs, syncs, nr_cpu_ids), 4711 GFP_KERNEL, cpu_to_node(cpu)); 4712 if (!new_ksyncs) { 4713 free_kick_syncs(); 4714 return -ENOMEM; 4715 } 4716 4717 rcu_assign_pointer(*ksyncs, new_ksyncs); 4718 } 4719 4720 return 0; 4721 } 4722 4723 static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops) 4724 { 4725 struct scx_sched *sch; 4726 int node, ret; 4727 4728 sch = kzalloc(sizeof(*sch), GFP_KERNEL); 4729 if (!sch) 4730 return ERR_PTR(-ENOMEM); 4731 4732 sch->exit_info = alloc_exit_info(ops->exit_dump_len); 4733 if (!sch->exit_info) { 4734 ret = -ENOMEM; 4735 goto err_free_sch; 4736 } 4737 4738 ret = rhashtable_init(&sch->dsq_hash, &dsq_hash_params); 4739 if (ret < 0) 4740 goto err_free_ei; 4741 4742 sch->global_dsqs = kcalloc(nr_node_ids, sizeof(sch->global_dsqs[0]), 4743 GFP_KERNEL); 4744 if (!sch->global_dsqs) { 4745 ret = -ENOMEM; 4746 goto err_free_hash; 4747 } 4748 4749 for_each_node_state(node, N_POSSIBLE) { 4750 struct scx_dispatch_q *dsq; 4751 4752 dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node); 4753 if (!dsq) { 4754 ret = -ENOMEM; 4755 goto err_free_gdsqs; 4756 } 4757 4758 init_dsq(dsq, SCX_DSQ_GLOBAL); 4759 sch->global_dsqs[node] = dsq; 4760 } 4761 4762 sch->pcpu = alloc_percpu(struct scx_sched_pcpu); 4763 if (!sch->pcpu) 4764 goto err_free_gdsqs; 4765 4766 sch->helper = kthread_run_worker(0, "sched_ext_helper"); 4767 if (IS_ERR(sch->helper)) { 4768 ret = PTR_ERR(sch->helper); 4769 goto err_free_pcpu; 4770 } 4771 4772 sched_set_fifo(sch->helper->task); 4773 4774 atomic_set(&sch->exit_kind, SCX_EXIT_NONE); 4775 init_irq_work(&sch->error_irq_work, scx_error_irq_workfn); 4776 kthread_init_work(&sch->disable_work, scx_disable_workfn); 4777 sch->ops = *ops; 4778 ops->priv = sch; 4779 4780 sch->kobj.kset = scx_kset; 4781 ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root"); 4782 if (ret < 0) 4783 goto err_stop_helper; 4784 4785 return sch; 4786 4787 err_stop_helper: 4788 kthread_stop(sch->helper->task); 4789 err_free_pcpu: 4790 free_percpu(sch->pcpu); 4791 err_free_gdsqs: 4792 for_each_node_state(node, N_POSSIBLE) 4793 kfree(sch->global_dsqs[node]); 4794 kfree(sch->global_dsqs); 4795 err_free_hash: 4796 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL); 4797 err_free_ei: 4798 free_exit_info(sch->exit_info); 4799 err_free_sch: 4800 kfree(sch); 4801 return ERR_PTR(ret); 4802 } 4803 4804 static int check_hotplug_seq(struct scx_sched *sch, 4805 const struct sched_ext_ops *ops) 4806 { 4807 unsigned long long global_hotplug_seq; 4808 4809 /* 4810 * If a hotplug event has occurred between when a scheduler was 4811 * initialized, and when we were able to attach, exit and notify user 4812 * space about it. 4813 */ 4814 if (ops->hotplug_seq) { 4815 global_hotplug_seq = atomic_long_read(&scx_hotplug_seq); 4816 if (ops->hotplug_seq != global_hotplug_seq) { 4817 scx_exit(sch, SCX_EXIT_UNREG_KERN, 4818 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG, 4819 "expected hotplug seq %llu did not match actual %llu", 4820 ops->hotplug_seq, global_hotplug_seq); 4821 return -EBUSY; 4822 } 4823 } 4824 4825 return 0; 4826 } 4827 4828 static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops) 4829 { 4830 /* 4831 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the 4832 * ops.enqueue() callback isn't implemented. 4833 */ 4834 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) { 4835 scx_error(sch, "SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented"); 4836 return -EINVAL; 4837 } 4838 4839 /* 4840 * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle 4841 * selection policy to be enabled. 4842 */ 4843 if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) && 4844 (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) { 4845 scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled"); 4846 return -EINVAL; 4847 } 4848 4849 if (ops->flags & SCX_OPS_HAS_CGROUP_WEIGHT) 4850 pr_warn("SCX_OPS_HAS_CGROUP_WEIGHT is deprecated and a noop\n"); 4851 4852 if (ops->cpu_acquire || ops->cpu_release) 4853 pr_warn("ops->cpu_acquire/release() are deprecated, use sched_switch TP instead\n"); 4854 4855 return 0; 4856 } 4857 4858 static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) 4859 { 4860 struct scx_sched *sch; 4861 struct scx_task_iter sti; 4862 struct task_struct *p; 4863 unsigned long timeout; 4864 int i, cpu, ret; 4865 4866 if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN), 4867 cpu_possible_mask)) { 4868 pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n"); 4869 return -EINVAL; 4870 } 4871 4872 mutex_lock(&scx_enable_mutex); 4873 4874 if (scx_enable_state() != SCX_DISABLED) { 4875 ret = -EBUSY; 4876 goto err_unlock; 4877 } 4878 4879 ret = alloc_kick_syncs(); 4880 if (ret) 4881 goto err_unlock; 4882 4883 sch = scx_alloc_and_add_sched(ops); 4884 if (IS_ERR(sch)) { 4885 ret = PTR_ERR(sch); 4886 goto err_free_ksyncs; 4887 } 4888 4889 /* 4890 * Transition to ENABLING and clear exit info to arm the disable path. 4891 * Failure triggers full disabling from here on. 4892 */ 4893 WARN_ON_ONCE(scx_set_enable_state(SCX_ENABLING) != SCX_DISABLED); 4894 WARN_ON_ONCE(scx_root); 4895 if (WARN_ON_ONCE(READ_ONCE(scx_aborting))) 4896 WRITE_ONCE(scx_aborting, false); 4897 4898 atomic_long_set(&scx_nr_rejected, 0); 4899 4900 for_each_possible_cpu(cpu) 4901 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE; 4902 4903 /* 4904 * Keep CPUs stable during enable so that the BPF scheduler can track 4905 * online CPUs by watching ->on/offline_cpu() after ->init(). 4906 */ 4907 cpus_read_lock(); 4908 4909 /* 4910 * Make the scheduler instance visible. Must be inside cpus_read_lock(). 4911 * See handle_hotplug(). 4912 */ 4913 rcu_assign_pointer(scx_root, sch); 4914 4915 scx_idle_enable(ops); 4916 4917 if (sch->ops.init) { 4918 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init, NULL); 4919 if (ret) { 4920 ret = ops_sanitize_err(sch, "init", ret); 4921 cpus_read_unlock(); 4922 scx_error(sch, "ops.init() failed (%d)", ret); 4923 goto err_disable; 4924 } 4925 sch->exit_info->flags |= SCX_EFLAG_INITIALIZED; 4926 } 4927 4928 for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++) 4929 if (((void (**)(void))ops)[i]) 4930 set_bit(i, sch->has_op); 4931 4932 ret = check_hotplug_seq(sch, ops); 4933 if (ret) { 4934 cpus_read_unlock(); 4935 goto err_disable; 4936 } 4937 scx_idle_update_selcpu_topology(ops); 4938 4939 cpus_read_unlock(); 4940 4941 ret = validate_ops(sch, ops); 4942 if (ret) 4943 goto err_disable; 4944 4945 WARN_ON_ONCE(scx_dsp_ctx); 4946 scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH; 4947 scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf, 4948 scx_dsp_max_batch), 4949 __alignof__(struct scx_dsp_ctx)); 4950 if (!scx_dsp_ctx) { 4951 ret = -ENOMEM; 4952 goto err_disable; 4953 } 4954 4955 if (ops->timeout_ms) 4956 timeout = msecs_to_jiffies(ops->timeout_ms); 4957 else 4958 timeout = SCX_WATCHDOG_MAX_TIMEOUT; 4959 4960 WRITE_ONCE(scx_watchdog_timeout, timeout); 4961 WRITE_ONCE(scx_watchdog_timestamp, jiffies); 4962 queue_delayed_work(system_unbound_wq, &scx_watchdog_work, 4963 scx_watchdog_timeout / 2); 4964 4965 /* 4966 * Once __scx_enabled is set, %current can be switched to SCX anytime. 4967 * This can lead to stalls as some BPF schedulers (e.g. userspace 4968 * scheduling) may not function correctly before all tasks are switched. 4969 * Init in bypass mode to guarantee forward progress. 4970 */ 4971 scx_bypass(true); 4972 4973 for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++) 4974 if (((void (**)(void))ops)[i]) 4975 set_bit(i, sch->has_op); 4976 4977 if (sch->ops.cpu_acquire || sch->ops.cpu_release) 4978 sch->ops.flags |= SCX_OPS_HAS_CPU_PREEMPT; 4979 4980 /* 4981 * Lock out forks, cgroup on/offlining and moves before opening the 4982 * floodgate so that they don't wander into the operations prematurely. 4983 */ 4984 percpu_down_write(&scx_fork_rwsem); 4985 4986 WARN_ON_ONCE(scx_init_task_enabled); 4987 scx_init_task_enabled = true; 4988 4989 /* 4990 * Enable ops for every task. Fork is excluded by scx_fork_rwsem 4991 * preventing new tasks from being added. No need to exclude tasks 4992 * leaving as sched_ext_free() can handle both prepped and enabled 4993 * tasks. Prep all tasks first and then enable them with preemption 4994 * disabled. 4995 * 4996 * All cgroups should be initialized before scx_init_task() so that the 4997 * BPF scheduler can reliably track each task's cgroup membership from 4998 * scx_init_task(). Lock out cgroup on/offlining and task migrations 4999 * while tasks are being initialized so that scx_cgroup_can_attach() 5000 * never sees uninitialized tasks. 5001 */ 5002 scx_cgroup_lock(); 5003 ret = scx_cgroup_init(sch); 5004 if (ret) 5005 goto err_disable_unlock_all; 5006 5007 scx_task_iter_start(&sti); 5008 while ((p = scx_task_iter_next_locked(&sti))) { 5009 /* 5010 * @p may already be dead, have lost all its usages counts and 5011 * be waiting for RCU grace period before being freed. @p can't 5012 * be initialized for SCX in such cases and should be ignored. 5013 */ 5014 if (!tryget_task_struct(p)) 5015 continue; 5016 5017 scx_task_iter_unlock(&sti); 5018 5019 ret = scx_init_task(p, task_group(p), false); 5020 if (ret) { 5021 put_task_struct(p); 5022 scx_task_iter_stop(&sti); 5023 scx_error(sch, "ops.init_task() failed (%d) for %s[%d]", 5024 ret, p->comm, p->pid); 5025 goto err_disable_unlock_all; 5026 } 5027 5028 scx_set_task_state(p, SCX_TASK_READY); 5029 5030 put_task_struct(p); 5031 } 5032 scx_task_iter_stop(&sti); 5033 scx_cgroup_unlock(); 5034 percpu_up_write(&scx_fork_rwsem); 5035 5036 /* 5037 * All tasks are READY. It's safe to turn on scx_enabled() and switch 5038 * all eligible tasks. 5039 */ 5040 WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL)); 5041 static_branch_enable(&__scx_enabled); 5042 5043 /* 5044 * We're fully committed and can't fail. The task READY -> ENABLED 5045 * transitions here are synchronized against sched_ext_free() through 5046 * scx_tasks_lock. 5047 */ 5048 percpu_down_write(&scx_fork_rwsem); 5049 scx_task_iter_start(&sti); 5050 while ((p = scx_task_iter_next_locked(&sti))) { 5051 unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE; 5052 const struct sched_class *old_class = p->sched_class; 5053 const struct sched_class *new_class = scx_setscheduler_class(p); 5054 5055 if (scx_get_task_state(p) != SCX_TASK_READY) 5056 continue; 5057 5058 if (old_class != new_class) 5059 queue_flags |= DEQUEUE_CLASS; 5060 5061 scoped_guard (sched_change, p, queue_flags) { 5062 p->scx.slice = READ_ONCE(scx_slice_dfl); 5063 p->sched_class = new_class; 5064 } 5065 } 5066 scx_task_iter_stop(&sti); 5067 percpu_up_write(&scx_fork_rwsem); 5068 5069 scx_bypass(false); 5070 5071 if (!scx_tryset_enable_state(SCX_ENABLED, SCX_ENABLING)) { 5072 WARN_ON_ONCE(atomic_read(&sch->exit_kind) == SCX_EXIT_NONE); 5073 goto err_disable; 5074 } 5075 5076 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL)) 5077 static_branch_enable(&__scx_switched_all); 5078 5079 pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n", 5080 sch->ops.name, scx_switched_all() ? "" : " (partial)"); 5081 kobject_uevent(&sch->kobj, KOBJ_ADD); 5082 mutex_unlock(&scx_enable_mutex); 5083 5084 atomic_long_inc(&scx_enable_seq); 5085 5086 return 0; 5087 5088 err_free_ksyncs: 5089 free_kick_syncs(); 5090 err_unlock: 5091 mutex_unlock(&scx_enable_mutex); 5092 return ret; 5093 5094 err_disable_unlock_all: 5095 scx_cgroup_unlock(); 5096 percpu_up_write(&scx_fork_rwsem); 5097 /* we'll soon enter disable path, keep bypass on */ 5098 err_disable: 5099 mutex_unlock(&scx_enable_mutex); 5100 /* 5101 * Returning an error code here would not pass all the error information 5102 * to userspace. Record errno using scx_error() for cases scx_error() 5103 * wasn't already invoked and exit indicating success so that the error 5104 * is notified through ops.exit() with all the details. 5105 * 5106 * Flush scx_disable_work to ensure that error is reported before init 5107 * completion. sch's base reference will be put by bpf_scx_unreg(). 5108 */ 5109 scx_error(sch, "scx_enable() failed (%d)", ret); 5110 kthread_flush_work(&sch->disable_work); 5111 return 0; 5112 } 5113 5114 5115 /******************************************************************************** 5116 * bpf_struct_ops plumbing. 5117 */ 5118 #include <linux/bpf_verifier.h> 5119 #include <linux/bpf.h> 5120 #include <linux/btf.h> 5121 5122 static const struct btf_type *task_struct_type; 5123 5124 static bool bpf_scx_is_valid_access(int off, int size, 5125 enum bpf_access_type type, 5126 const struct bpf_prog *prog, 5127 struct bpf_insn_access_aux *info) 5128 { 5129 if (type != BPF_READ) 5130 return false; 5131 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 5132 return false; 5133 if (off % size != 0) 5134 return false; 5135 5136 return btf_ctx_access(off, size, type, prog, info); 5137 } 5138 5139 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log, 5140 const struct bpf_reg_state *reg, int off, 5141 int size) 5142 { 5143 const struct btf_type *t; 5144 5145 t = btf_type_by_id(reg->btf, reg->btf_id); 5146 if (t == task_struct_type) { 5147 if (off >= offsetof(struct task_struct, scx.slice) && 5148 off + size <= offsetofend(struct task_struct, scx.slice)) 5149 return SCALAR_VALUE; 5150 if (off >= offsetof(struct task_struct, scx.dsq_vtime) && 5151 off + size <= offsetofend(struct task_struct, scx.dsq_vtime)) 5152 return SCALAR_VALUE; 5153 if (off >= offsetof(struct task_struct, scx.disallow) && 5154 off + size <= offsetofend(struct task_struct, scx.disallow)) 5155 return SCALAR_VALUE; 5156 } 5157 5158 return -EACCES; 5159 } 5160 5161 static const struct bpf_verifier_ops bpf_scx_verifier_ops = { 5162 .get_func_proto = bpf_base_func_proto, 5163 .is_valid_access = bpf_scx_is_valid_access, 5164 .btf_struct_access = bpf_scx_btf_struct_access, 5165 }; 5166 5167 static int bpf_scx_init_member(const struct btf_type *t, 5168 const struct btf_member *member, 5169 void *kdata, const void *udata) 5170 { 5171 const struct sched_ext_ops *uops = udata; 5172 struct sched_ext_ops *ops = kdata; 5173 u32 moff = __btf_member_bit_offset(t, member) / 8; 5174 int ret; 5175 5176 switch (moff) { 5177 case offsetof(struct sched_ext_ops, dispatch_max_batch): 5178 if (*(u32 *)(udata + moff) > INT_MAX) 5179 return -E2BIG; 5180 ops->dispatch_max_batch = *(u32 *)(udata + moff); 5181 return 1; 5182 case offsetof(struct sched_ext_ops, flags): 5183 if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS) 5184 return -EINVAL; 5185 ops->flags = *(u64 *)(udata + moff); 5186 return 1; 5187 case offsetof(struct sched_ext_ops, name): 5188 ret = bpf_obj_name_cpy(ops->name, uops->name, 5189 sizeof(ops->name)); 5190 if (ret < 0) 5191 return ret; 5192 if (ret == 0) 5193 return -EINVAL; 5194 return 1; 5195 case offsetof(struct sched_ext_ops, timeout_ms): 5196 if (msecs_to_jiffies(*(u32 *)(udata + moff)) > 5197 SCX_WATCHDOG_MAX_TIMEOUT) 5198 return -E2BIG; 5199 ops->timeout_ms = *(u32 *)(udata + moff); 5200 return 1; 5201 case offsetof(struct sched_ext_ops, exit_dump_len): 5202 ops->exit_dump_len = 5203 *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN; 5204 return 1; 5205 case offsetof(struct sched_ext_ops, hotplug_seq): 5206 ops->hotplug_seq = *(u64 *)(udata + moff); 5207 return 1; 5208 } 5209 5210 return 0; 5211 } 5212 5213 static int bpf_scx_check_member(const struct btf_type *t, 5214 const struct btf_member *member, 5215 const struct bpf_prog *prog) 5216 { 5217 u32 moff = __btf_member_bit_offset(t, member) / 8; 5218 5219 switch (moff) { 5220 case offsetof(struct sched_ext_ops, init_task): 5221 #ifdef CONFIG_EXT_GROUP_SCHED 5222 case offsetof(struct sched_ext_ops, cgroup_init): 5223 case offsetof(struct sched_ext_ops, cgroup_exit): 5224 case offsetof(struct sched_ext_ops, cgroup_prep_move): 5225 #endif 5226 case offsetof(struct sched_ext_ops, cpu_online): 5227 case offsetof(struct sched_ext_ops, cpu_offline): 5228 case offsetof(struct sched_ext_ops, init): 5229 case offsetof(struct sched_ext_ops, exit): 5230 break; 5231 default: 5232 if (prog->sleepable) 5233 return -EINVAL; 5234 } 5235 5236 return 0; 5237 } 5238 5239 static int bpf_scx_reg(void *kdata, struct bpf_link *link) 5240 { 5241 return scx_enable(kdata, link); 5242 } 5243 5244 static void bpf_scx_unreg(void *kdata, struct bpf_link *link) 5245 { 5246 struct sched_ext_ops *ops = kdata; 5247 struct scx_sched *sch = ops->priv; 5248 5249 scx_disable(SCX_EXIT_UNREG); 5250 kthread_flush_work(&sch->disable_work); 5251 kobject_put(&sch->kobj); 5252 } 5253 5254 static int bpf_scx_init(struct btf *btf) 5255 { 5256 task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]); 5257 5258 return 0; 5259 } 5260 5261 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link) 5262 { 5263 /* 5264 * sched_ext does not support updating the actively-loaded BPF 5265 * scheduler, as registering a BPF scheduler can always fail if the 5266 * scheduler returns an error code for e.g. ops.init(), ops.init_task(), 5267 * etc. Similarly, we can always race with unregistration happening 5268 * elsewhere, such as with sysrq. 5269 */ 5270 return -EOPNOTSUPP; 5271 } 5272 5273 static int bpf_scx_validate(void *kdata) 5274 { 5275 return 0; 5276 } 5277 5278 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; } 5279 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {} 5280 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {} 5281 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {} 5282 static void sched_ext_ops__tick(struct task_struct *p) {} 5283 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {} 5284 static void sched_ext_ops__running(struct task_struct *p) {} 5285 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {} 5286 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {} 5287 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; } 5288 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; } 5289 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {} 5290 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {} 5291 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {} 5292 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {} 5293 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {} 5294 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; } 5295 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {} 5296 static void sched_ext_ops__enable(struct task_struct *p) {} 5297 static void sched_ext_ops__disable(struct task_struct *p) {} 5298 #ifdef CONFIG_EXT_GROUP_SCHED 5299 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; } 5300 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {} 5301 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; } 5302 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {} 5303 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {} 5304 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {} 5305 static void sched_ext_ops__cgroup_set_bandwidth(struct cgroup *cgrp, u64 period_us, u64 quota_us, u64 burst_us) {} 5306 static void sched_ext_ops__cgroup_set_idle(struct cgroup *cgrp, bool idle) {} 5307 #endif 5308 static void sched_ext_ops__cpu_online(s32 cpu) {} 5309 static void sched_ext_ops__cpu_offline(s32 cpu) {} 5310 static s32 sched_ext_ops__init(void) { return -EINVAL; } 5311 static void sched_ext_ops__exit(struct scx_exit_info *info) {} 5312 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {} 5313 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {} 5314 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {} 5315 5316 static struct sched_ext_ops __bpf_ops_sched_ext_ops = { 5317 .select_cpu = sched_ext_ops__select_cpu, 5318 .enqueue = sched_ext_ops__enqueue, 5319 .dequeue = sched_ext_ops__dequeue, 5320 .dispatch = sched_ext_ops__dispatch, 5321 .tick = sched_ext_ops__tick, 5322 .runnable = sched_ext_ops__runnable, 5323 .running = sched_ext_ops__running, 5324 .stopping = sched_ext_ops__stopping, 5325 .quiescent = sched_ext_ops__quiescent, 5326 .yield = sched_ext_ops__yield, 5327 .core_sched_before = sched_ext_ops__core_sched_before, 5328 .set_weight = sched_ext_ops__set_weight, 5329 .set_cpumask = sched_ext_ops__set_cpumask, 5330 .update_idle = sched_ext_ops__update_idle, 5331 .cpu_acquire = sched_ext_ops__cpu_acquire, 5332 .cpu_release = sched_ext_ops__cpu_release, 5333 .init_task = sched_ext_ops__init_task, 5334 .exit_task = sched_ext_ops__exit_task, 5335 .enable = sched_ext_ops__enable, 5336 .disable = sched_ext_ops__disable, 5337 #ifdef CONFIG_EXT_GROUP_SCHED 5338 .cgroup_init = sched_ext_ops__cgroup_init, 5339 .cgroup_exit = sched_ext_ops__cgroup_exit, 5340 .cgroup_prep_move = sched_ext_ops__cgroup_prep_move, 5341 .cgroup_move = sched_ext_ops__cgroup_move, 5342 .cgroup_cancel_move = sched_ext_ops__cgroup_cancel_move, 5343 .cgroup_set_weight = sched_ext_ops__cgroup_set_weight, 5344 .cgroup_set_bandwidth = sched_ext_ops__cgroup_set_bandwidth, 5345 .cgroup_set_idle = sched_ext_ops__cgroup_set_idle, 5346 #endif 5347 .cpu_online = sched_ext_ops__cpu_online, 5348 .cpu_offline = sched_ext_ops__cpu_offline, 5349 .init = sched_ext_ops__init, 5350 .exit = sched_ext_ops__exit, 5351 .dump = sched_ext_ops__dump, 5352 .dump_cpu = sched_ext_ops__dump_cpu, 5353 .dump_task = sched_ext_ops__dump_task, 5354 }; 5355 5356 static struct bpf_struct_ops bpf_sched_ext_ops = { 5357 .verifier_ops = &bpf_scx_verifier_ops, 5358 .reg = bpf_scx_reg, 5359 .unreg = bpf_scx_unreg, 5360 .check_member = bpf_scx_check_member, 5361 .init_member = bpf_scx_init_member, 5362 .init = bpf_scx_init, 5363 .update = bpf_scx_update, 5364 .validate = bpf_scx_validate, 5365 .name = "sched_ext_ops", 5366 .owner = THIS_MODULE, 5367 .cfi_stubs = &__bpf_ops_sched_ext_ops 5368 }; 5369 5370 5371 /******************************************************************************** 5372 * System integration and init. 5373 */ 5374 5375 static void sysrq_handle_sched_ext_reset(u8 key) 5376 { 5377 scx_disable(SCX_EXIT_SYSRQ); 5378 } 5379 5380 static const struct sysrq_key_op sysrq_sched_ext_reset_op = { 5381 .handler = sysrq_handle_sched_ext_reset, 5382 .help_msg = "reset-sched-ext(S)", 5383 .action_msg = "Disable sched_ext and revert all tasks to CFS", 5384 .enable_mask = SYSRQ_ENABLE_RTNICE, 5385 }; 5386 5387 static void sysrq_handle_sched_ext_dump(u8 key) 5388 { 5389 struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" }; 5390 5391 if (scx_enabled()) 5392 scx_dump_state(&ei, 0); 5393 } 5394 5395 static const struct sysrq_key_op sysrq_sched_ext_dump_op = { 5396 .handler = sysrq_handle_sched_ext_dump, 5397 .help_msg = "dump-sched-ext(D)", 5398 .action_msg = "Trigger sched_ext debug dump", 5399 .enable_mask = SYSRQ_ENABLE_RTNICE, 5400 }; 5401 5402 static bool can_skip_idle_kick(struct rq *rq) 5403 { 5404 lockdep_assert_rq_held(rq); 5405 5406 /* 5407 * We can skip idle kicking if @rq is going to go through at least one 5408 * full SCX scheduling cycle before going idle. Just checking whether 5409 * curr is not idle is insufficient because we could be racing 5410 * balance_one() trying to pull the next task from a remote rq, which 5411 * may fail, and @rq may become idle afterwards. 5412 * 5413 * The race window is small and we don't and can't guarantee that @rq is 5414 * only kicked while idle anyway. Skip only when sure. 5415 */ 5416 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE); 5417 } 5418 5419 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *ksyncs) 5420 { 5421 struct rq *rq = cpu_rq(cpu); 5422 struct scx_rq *this_scx = &this_rq->scx; 5423 const struct sched_class *cur_class; 5424 bool should_wait = false; 5425 unsigned long flags; 5426 5427 raw_spin_rq_lock_irqsave(rq, flags); 5428 cur_class = rq->curr->sched_class; 5429 5430 /* 5431 * During CPU hotplug, a CPU may depend on kicking itself to make 5432 * forward progress. Allow kicking self regardless of online state. If 5433 * @cpu is running a higher class task, we have no control over @cpu. 5434 * Skip kicking. 5435 */ 5436 if ((cpu_online(cpu) || cpu == cpu_of(this_rq)) && 5437 !sched_class_above(cur_class, &ext_sched_class)) { 5438 if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) { 5439 if (cur_class == &ext_sched_class) 5440 rq->curr->scx.slice = 0; 5441 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt); 5442 } 5443 5444 if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) { 5445 if (cur_class == &ext_sched_class) { 5446 ksyncs[cpu] = rq->scx.kick_sync; 5447 should_wait = true; 5448 } else { 5449 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); 5450 } 5451 } 5452 5453 resched_curr(rq); 5454 } else { 5455 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt); 5456 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); 5457 } 5458 5459 raw_spin_rq_unlock_irqrestore(rq, flags); 5460 5461 return should_wait; 5462 } 5463 5464 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq) 5465 { 5466 struct rq *rq = cpu_rq(cpu); 5467 unsigned long flags; 5468 5469 raw_spin_rq_lock_irqsave(rq, flags); 5470 5471 if (!can_skip_idle_kick(rq) && 5472 (cpu_online(cpu) || cpu == cpu_of(this_rq))) 5473 resched_curr(rq); 5474 5475 raw_spin_rq_unlock_irqrestore(rq, flags); 5476 } 5477 5478 static void kick_cpus_irq_workfn(struct irq_work *irq_work) 5479 { 5480 struct rq *this_rq = this_rq(); 5481 struct scx_rq *this_scx = &this_rq->scx; 5482 struct scx_kick_syncs __rcu *ksyncs_pcpu = __this_cpu_read(scx_kick_syncs); 5483 bool should_wait = false; 5484 unsigned long *ksyncs; 5485 s32 cpu; 5486 5487 if (unlikely(!ksyncs_pcpu)) { 5488 pr_warn_once("kick_cpus_irq_workfn() called with NULL scx_kick_syncs"); 5489 return; 5490 } 5491 5492 ksyncs = rcu_dereference_bh(ksyncs_pcpu)->syncs; 5493 5494 for_each_cpu(cpu, this_scx->cpus_to_kick) { 5495 should_wait |= kick_one_cpu(cpu, this_rq, ksyncs); 5496 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick); 5497 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle); 5498 } 5499 5500 for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) { 5501 kick_one_cpu_if_idle(cpu, this_rq); 5502 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle); 5503 } 5504 5505 if (!should_wait) 5506 return; 5507 5508 for_each_cpu(cpu, this_scx->cpus_to_wait) { 5509 unsigned long *wait_kick_sync = &cpu_rq(cpu)->scx.kick_sync; 5510 5511 /* 5512 * Busy-wait until the task running at the time of kicking is no 5513 * longer running. This can be used to implement e.g. core 5514 * scheduling. 5515 * 5516 * smp_cond_load_acquire() pairs with store_releases in 5517 * pick_task_scx() and put_prev_task_scx(). The former breaks 5518 * the wait if SCX's scheduling path is entered even if the same 5519 * task is picked subsequently. The latter is necessary to break 5520 * the wait when $cpu is taken by a higher sched class. 5521 */ 5522 if (cpu != cpu_of(this_rq)) 5523 smp_cond_load_acquire(wait_kick_sync, VAL != ksyncs[cpu]); 5524 5525 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); 5526 } 5527 } 5528 5529 /** 5530 * print_scx_info - print out sched_ext scheduler state 5531 * @log_lvl: the log level to use when printing 5532 * @p: target task 5533 * 5534 * If a sched_ext scheduler is enabled, print the name and state of the 5535 * scheduler. If @p is on sched_ext, print further information about the task. 5536 * 5537 * This function can be safely called on any task as long as the task_struct 5538 * itself is accessible. While safe, this function isn't synchronized and may 5539 * print out mixups or garbages of limited length. 5540 */ 5541 void print_scx_info(const char *log_lvl, struct task_struct *p) 5542 { 5543 struct scx_sched *sch = scx_root; 5544 enum scx_enable_state state = scx_enable_state(); 5545 const char *all = READ_ONCE(scx_switching_all) ? "+all" : ""; 5546 char runnable_at_buf[22] = "?"; 5547 struct sched_class *class; 5548 unsigned long runnable_at; 5549 5550 if (state == SCX_DISABLED) 5551 return; 5552 5553 /* 5554 * Carefully check if the task was running on sched_ext, and then 5555 * carefully copy the time it's been runnable, and its state. 5556 */ 5557 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) || 5558 class != &ext_sched_class) { 5559 printk("%sSched_ext: %s (%s%s)", log_lvl, sch->ops.name, 5560 scx_enable_state_str[state], all); 5561 return; 5562 } 5563 5564 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at, 5565 sizeof(runnable_at))) 5566 scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms", 5567 jiffies_delta_msecs(runnable_at, jiffies)); 5568 5569 /* print everything onto one line to conserve console space */ 5570 printk("%sSched_ext: %s (%s%s), task: runnable_at=%s", 5571 log_lvl, sch->ops.name, scx_enable_state_str[state], all, 5572 runnable_at_buf); 5573 } 5574 5575 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr) 5576 { 5577 /* 5578 * SCX schedulers often have userspace components which are sometimes 5579 * involved in critial scheduling paths. PM operations involve freezing 5580 * userspace which can lead to scheduling misbehaviors including stalls. 5581 * Let's bypass while PM operations are in progress. 5582 */ 5583 switch (event) { 5584 case PM_HIBERNATION_PREPARE: 5585 case PM_SUSPEND_PREPARE: 5586 case PM_RESTORE_PREPARE: 5587 scx_bypass(true); 5588 break; 5589 case PM_POST_HIBERNATION: 5590 case PM_POST_SUSPEND: 5591 case PM_POST_RESTORE: 5592 scx_bypass(false); 5593 break; 5594 } 5595 5596 return NOTIFY_OK; 5597 } 5598 5599 static struct notifier_block scx_pm_notifier = { 5600 .notifier_call = scx_pm_handler, 5601 }; 5602 5603 void __init init_sched_ext_class(void) 5604 { 5605 s32 cpu, v; 5606 5607 /* 5608 * The following is to prevent the compiler from optimizing out the enum 5609 * definitions so that BPF scheduler implementations can use them 5610 * through the generated vmlinux.h. 5611 */ 5612 WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT | 5613 SCX_TG_ONLINE); 5614 5615 scx_idle_init_masks(); 5616 5617 for_each_possible_cpu(cpu) { 5618 struct rq *rq = cpu_rq(cpu); 5619 int n = cpu_to_node(cpu); 5620 5621 init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); 5622 init_dsq(&rq->scx.bypass_dsq, SCX_DSQ_BYPASS); 5623 INIT_LIST_HEAD(&rq->scx.runnable_list); 5624 INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals); 5625 5626 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n)); 5627 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n)); 5628 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n)); 5629 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n)); 5630 rq->scx.deferred_irq_work = IRQ_WORK_INIT_HARD(deferred_irq_workfn); 5631 rq->scx.kick_cpus_irq_work = IRQ_WORK_INIT_HARD(kick_cpus_irq_workfn); 5632 5633 if (cpu_online(cpu)) 5634 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE; 5635 } 5636 5637 register_sysrq_key('S', &sysrq_sched_ext_reset_op); 5638 register_sysrq_key('D', &sysrq_sched_ext_dump_op); 5639 INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn); 5640 } 5641 5642 5643 /******************************************************************************** 5644 * Helpers that can be called from the BPF scheduler. 5645 */ 5646 static bool scx_dsq_insert_preamble(struct scx_sched *sch, struct task_struct *p, 5647 u64 enq_flags) 5648 { 5649 if (!scx_kf_allowed(sch, SCX_KF_ENQUEUE | SCX_KF_DISPATCH)) 5650 return false; 5651 5652 lockdep_assert_irqs_disabled(); 5653 5654 if (unlikely(!p)) { 5655 scx_error(sch, "called with NULL task"); 5656 return false; 5657 } 5658 5659 if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) { 5660 scx_error(sch, "invalid enq_flags 0x%llx", enq_flags); 5661 return false; 5662 } 5663 5664 return true; 5665 } 5666 5667 static void scx_dsq_insert_commit(struct scx_sched *sch, struct task_struct *p, 5668 u64 dsq_id, u64 enq_flags) 5669 { 5670 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 5671 struct task_struct *ddsp_task; 5672 5673 ddsp_task = __this_cpu_read(direct_dispatch_task); 5674 if (ddsp_task) { 5675 mark_direct_dispatch(sch, ddsp_task, p, dsq_id, enq_flags); 5676 return; 5677 } 5678 5679 if (unlikely(dspc->cursor >= scx_dsp_max_batch)) { 5680 scx_error(sch, "dispatch buffer overflow"); 5681 return; 5682 } 5683 5684 dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){ 5685 .task = p, 5686 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK, 5687 .dsq_id = dsq_id, 5688 .enq_flags = enq_flags, 5689 }; 5690 } 5691 5692 __bpf_kfunc_start_defs(); 5693 5694 /** 5695 * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ 5696 * @p: task_struct to insert 5697 * @dsq_id: DSQ to insert into 5698 * @slice: duration @p can run for in nsecs, 0 to keep the current value 5699 * @enq_flags: SCX_ENQ_* 5700 * 5701 * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to 5702 * call this function spuriously. Can be called from ops.enqueue(), 5703 * ops.select_cpu(), and ops.dispatch(). 5704 * 5705 * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch 5706 * and @p must match the task being enqueued. 5707 * 5708 * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p 5709 * will be directly inserted into the corresponding dispatch queue after 5710 * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be 5711 * inserted into the local DSQ of the CPU returned by ops.select_cpu(). 5712 * @enq_flags are OR'd with the enqueue flags on the enqueue path before the 5713 * task is inserted. 5714 * 5715 * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id 5716 * and this function can be called upto ops.dispatch_max_batch times to insert 5717 * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the 5718 * remaining slots. scx_bpf_dsq_move_to_local() flushes the batch and resets the 5719 * counter. 5720 * 5721 * This function doesn't have any locking restrictions and may be called under 5722 * BPF locks (in the future when BPF introduces more flexible locking). 5723 * 5724 * @p is allowed to run for @slice. The scheduling path is triggered on slice 5725 * exhaustion. If zero, the current residual slice is maintained. If 5726 * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with 5727 * scx_bpf_kick_cpu() to trigger scheduling. 5728 * 5729 * Returns %true on successful insertion, %false on failure. On the root 5730 * scheduler, %false return triggers scheduler abort and the caller doesn't need 5731 * to check the return value. 5732 */ 5733 __bpf_kfunc bool scx_bpf_dsq_insert___v2(struct task_struct *p, u64 dsq_id, 5734 u64 slice, u64 enq_flags) 5735 { 5736 struct scx_sched *sch; 5737 5738 guard(rcu)(); 5739 sch = rcu_dereference(scx_root); 5740 if (unlikely(!sch)) 5741 return false; 5742 5743 if (!scx_dsq_insert_preamble(sch, p, enq_flags)) 5744 return false; 5745 5746 if (slice) 5747 p->scx.slice = slice; 5748 else 5749 p->scx.slice = p->scx.slice ?: 1; 5750 5751 scx_dsq_insert_commit(sch, p, dsq_id, enq_flags); 5752 5753 return true; 5754 } 5755 5756 /* 5757 * COMPAT: Will be removed in v6.23 along with the ___v2 suffix. 5758 */ 5759 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, 5760 u64 slice, u64 enq_flags) 5761 { 5762 scx_bpf_dsq_insert___v2(p, dsq_id, slice, enq_flags); 5763 } 5764 5765 static bool scx_dsq_insert_vtime(struct scx_sched *sch, struct task_struct *p, 5766 u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) 5767 { 5768 if (!scx_dsq_insert_preamble(sch, p, enq_flags)) 5769 return false; 5770 5771 if (slice) 5772 p->scx.slice = slice; 5773 else 5774 p->scx.slice = p->scx.slice ?: 1; 5775 5776 p->scx.dsq_vtime = vtime; 5777 5778 scx_dsq_insert_commit(sch, p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); 5779 5780 return true; 5781 } 5782 5783 struct scx_bpf_dsq_insert_vtime_args { 5784 /* @p can't be packed together as KF_RCU is not transitive */ 5785 u64 dsq_id; 5786 u64 slice; 5787 u64 vtime; 5788 u64 enq_flags; 5789 }; 5790 5791 /** 5792 * __scx_bpf_dsq_insert_vtime - Arg-wrapped vtime DSQ insertion 5793 * @p: task_struct to insert 5794 * @args: struct containing the rest of the arguments 5795 * @args->dsq_id: DSQ to insert into 5796 * @args->slice: duration @p can run for in nsecs, 0 to keep the current value 5797 * @args->vtime: @p's ordering inside the vtime-sorted queue of the target DSQ 5798 * @args->enq_flags: SCX_ENQ_* 5799 * 5800 * Wrapper kfunc that takes arguments via struct to work around BPF's 5 argument 5801 * limit. BPF programs should use scx_bpf_dsq_insert_vtime() which is provided 5802 * as an inline wrapper in common.bpf.h. 5803 * 5804 * Insert @p into the vtime priority queue of the DSQ identified by 5805 * @args->dsq_id. Tasks queued into the priority queue are ordered by 5806 * @args->vtime. All other aspects are identical to scx_bpf_dsq_insert(). 5807 * 5808 * @args->vtime ordering is according to time_before64() which considers 5809 * wrapping. A numerically larger vtime may indicate an earlier position in the 5810 * ordering and vice-versa. 5811 * 5812 * A DSQ can only be used as a FIFO or priority queue at any given time and this 5813 * function must not be called on a DSQ which already has one or more FIFO tasks 5814 * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and 5815 * SCX_DSQ_GLOBAL) cannot be used as priority queues. 5816 * 5817 * Returns %true on successful insertion, %false on failure. On the root 5818 * scheduler, %false return triggers scheduler abort and the caller doesn't need 5819 * to check the return value. 5820 */ 5821 __bpf_kfunc bool 5822 __scx_bpf_dsq_insert_vtime(struct task_struct *p, 5823 struct scx_bpf_dsq_insert_vtime_args *args) 5824 { 5825 struct scx_sched *sch; 5826 5827 guard(rcu)(); 5828 5829 sch = rcu_dereference(scx_root); 5830 if (unlikely(!sch)) 5831 return false; 5832 5833 return scx_dsq_insert_vtime(sch, p, args->dsq_id, args->slice, 5834 args->vtime, args->enq_flags); 5835 } 5836 5837 /* 5838 * COMPAT: Will be removed in v6.23. 5839 */ 5840 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, 5841 u64 slice, u64 vtime, u64 enq_flags) 5842 { 5843 struct scx_sched *sch; 5844 5845 guard(rcu)(); 5846 5847 sch = rcu_dereference(scx_root); 5848 if (unlikely(!sch)) 5849 return; 5850 5851 scx_dsq_insert_vtime(sch, p, dsq_id, slice, vtime, enq_flags); 5852 } 5853 5854 __bpf_kfunc_end_defs(); 5855 5856 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch) 5857 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU) 5858 BTF_ID_FLAGS(func, scx_bpf_dsq_insert___v2, KF_RCU) 5859 BTF_ID_FLAGS(func, __scx_bpf_dsq_insert_vtime, KF_RCU) 5860 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU) 5861 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch) 5862 5863 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = { 5864 .owner = THIS_MODULE, 5865 .set = &scx_kfunc_ids_enqueue_dispatch, 5866 }; 5867 5868 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit, 5869 struct task_struct *p, u64 dsq_id, u64 enq_flags) 5870 { 5871 struct scx_sched *sch = scx_root; 5872 struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq; 5873 struct rq *this_rq, *src_rq, *locked_rq; 5874 bool dispatched = false; 5875 bool in_balance; 5876 unsigned long flags; 5877 5878 if (!scx_kf_allowed_if_unlocked() && 5879 !scx_kf_allowed(sch, SCX_KF_DISPATCH)) 5880 return false; 5881 5882 /* 5883 * If the BPF scheduler keeps calling this function repeatedly, it can 5884 * cause similar live-lock conditions as consume_dispatch_q(). 5885 */ 5886 if (unlikely(READ_ONCE(scx_aborting))) 5887 return false; 5888 5889 /* 5890 * Can be called from either ops.dispatch() locking this_rq() or any 5891 * context where no rq lock is held. If latter, lock @p's task_rq which 5892 * we'll likely need anyway. 5893 */ 5894 src_rq = task_rq(p); 5895 5896 local_irq_save(flags); 5897 this_rq = this_rq(); 5898 in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE; 5899 5900 if (in_balance) { 5901 if (this_rq != src_rq) { 5902 raw_spin_rq_unlock(this_rq); 5903 raw_spin_rq_lock(src_rq); 5904 } 5905 } else { 5906 raw_spin_rq_lock(src_rq); 5907 } 5908 5909 locked_rq = src_rq; 5910 raw_spin_lock(&src_dsq->lock); 5911 5912 /* 5913 * Did someone else get to it? @p could have already left $src_dsq, got 5914 * re-enqueud, or be in the process of being consumed by someone else. 5915 */ 5916 if (unlikely(p->scx.dsq != src_dsq || 5917 u32_before(kit->cursor.priv, p->scx.dsq_seq) || 5918 p->scx.holding_cpu >= 0) || 5919 WARN_ON_ONCE(src_rq != task_rq(p))) { 5920 raw_spin_unlock(&src_dsq->lock); 5921 goto out; 5922 } 5923 5924 /* @p is still on $src_dsq and stable, determine the destination */ 5925 dst_dsq = find_dsq_for_dispatch(sch, this_rq, dsq_id, p); 5926 5927 /* 5928 * Apply vtime and slice updates before moving so that the new time is 5929 * visible before inserting into $dst_dsq. @p is still on $src_dsq but 5930 * this is safe as we're locking it. 5931 */ 5932 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME) 5933 p->scx.dsq_vtime = kit->vtime; 5934 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE) 5935 p->scx.slice = kit->slice; 5936 5937 /* execute move */ 5938 locked_rq = move_task_between_dsqs(sch, p, enq_flags, src_dsq, dst_dsq); 5939 dispatched = true; 5940 out: 5941 if (in_balance) { 5942 if (this_rq != locked_rq) { 5943 raw_spin_rq_unlock(locked_rq); 5944 raw_spin_rq_lock(this_rq); 5945 } 5946 } else { 5947 raw_spin_rq_unlock_irqrestore(locked_rq, flags); 5948 } 5949 5950 kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE | 5951 __SCX_DSQ_ITER_HAS_VTIME); 5952 return dispatched; 5953 } 5954 5955 __bpf_kfunc_start_defs(); 5956 5957 /** 5958 * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots 5959 * 5960 * Can only be called from ops.dispatch(). 5961 */ 5962 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void) 5963 { 5964 struct scx_sched *sch; 5965 5966 guard(rcu)(); 5967 5968 sch = rcu_dereference(scx_root); 5969 if (unlikely(!sch)) 5970 return 0; 5971 5972 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH)) 5973 return 0; 5974 5975 return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor); 5976 } 5977 5978 /** 5979 * scx_bpf_dispatch_cancel - Cancel the latest dispatch 5980 * 5981 * Cancel the latest dispatch. Can be called multiple times to cancel further 5982 * dispatches. Can only be called from ops.dispatch(). 5983 */ 5984 __bpf_kfunc void scx_bpf_dispatch_cancel(void) 5985 { 5986 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 5987 struct scx_sched *sch; 5988 5989 guard(rcu)(); 5990 5991 sch = rcu_dereference(scx_root); 5992 if (unlikely(!sch)) 5993 return; 5994 5995 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH)) 5996 return; 5997 5998 if (dspc->cursor > 0) 5999 dspc->cursor--; 6000 else 6001 scx_error(sch, "dispatch buffer underflow"); 6002 } 6003 6004 /** 6005 * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ 6006 * @dsq_id: DSQ to move task from 6007 * 6008 * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's 6009 * local DSQ for execution. Can only be called from ops.dispatch(). 6010 * 6011 * This function flushes the in-flight dispatches from scx_bpf_dsq_insert() 6012 * before trying to move from the specified DSQ. It may also grab rq locks and 6013 * thus can't be called under any BPF locks. 6014 * 6015 * Returns %true if a task has been moved, %false if there isn't any task to 6016 * move. 6017 */ 6018 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id) 6019 { 6020 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 6021 struct scx_dispatch_q *dsq; 6022 struct scx_sched *sch; 6023 6024 guard(rcu)(); 6025 6026 sch = rcu_dereference(scx_root); 6027 if (unlikely(!sch)) 6028 return false; 6029 6030 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH)) 6031 return false; 6032 6033 flush_dispatch_buf(sch, dspc->rq); 6034 6035 dsq = find_user_dsq(sch, dsq_id); 6036 if (unlikely(!dsq)) { 6037 scx_error(sch, "invalid DSQ ID 0x%016llx", dsq_id); 6038 return false; 6039 } 6040 6041 if (consume_dispatch_q(sch, dspc->rq, dsq)) { 6042 /* 6043 * A successfully consumed task can be dequeued before it starts 6044 * running while the CPU is trying to migrate other dispatched 6045 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty 6046 * local DSQ. 6047 */ 6048 dspc->nr_tasks++; 6049 return true; 6050 } else { 6051 return false; 6052 } 6053 } 6054 6055 /** 6056 * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs 6057 * @it__iter: DSQ iterator in progress 6058 * @slice: duration the moved task can run for in nsecs 6059 * 6060 * Override the slice of the next task that will be moved from @it__iter using 6061 * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous 6062 * slice duration is kept. 6063 */ 6064 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, 6065 u64 slice) 6066 { 6067 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter; 6068 6069 kit->slice = slice; 6070 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE; 6071 } 6072 6073 /** 6074 * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs 6075 * @it__iter: DSQ iterator in progress 6076 * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ 6077 * 6078 * Override the vtime of the next task that will be moved from @it__iter using 6079 * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice 6080 * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the 6081 * override is ignored and cleared. 6082 */ 6083 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, 6084 u64 vtime) 6085 { 6086 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter; 6087 6088 kit->vtime = vtime; 6089 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME; 6090 } 6091 6092 /** 6093 * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ 6094 * @it__iter: DSQ iterator in progress 6095 * @p: task to transfer 6096 * @dsq_id: DSQ to move @p to 6097 * @enq_flags: SCX_ENQ_* 6098 * 6099 * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ 6100 * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can 6101 * be the destination. 6102 * 6103 * For the transfer to be successful, @p must still be on the DSQ and have been 6104 * queued before the DSQ iteration started. This function doesn't care whether 6105 * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have 6106 * been queued before the iteration started. 6107 * 6108 * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update. 6109 * 6110 * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq 6111 * lock (e.g. BPF timers or SYSCALL programs). 6112 * 6113 * Returns %true if @p has been consumed, %false if @p had already been 6114 * consumed, dequeued, or, for sub-scheds, @dsq_id points to a disallowed local 6115 * DSQ. 6116 */ 6117 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, 6118 struct task_struct *p, u64 dsq_id, 6119 u64 enq_flags) 6120 { 6121 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter, 6122 p, dsq_id, enq_flags); 6123 } 6124 6125 /** 6126 * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ 6127 * @it__iter: DSQ iterator in progress 6128 * @p: task to transfer 6129 * @dsq_id: DSQ to move @p to 6130 * @enq_flags: SCX_ENQ_* 6131 * 6132 * Transfer @p which is on the DSQ currently iterated by @it__iter to the 6133 * priority queue of the DSQ specified by @dsq_id. The destination must be a 6134 * user DSQ as only user DSQs support priority queue. 6135 * 6136 * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice() 6137 * and scx_bpf_dsq_move_set_vtime() to update. 6138 * 6139 * All other aspects are identical to scx_bpf_dsq_move(). See 6140 * scx_bpf_dsq_insert_vtime() for more information on @vtime. 6141 */ 6142 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, 6143 struct task_struct *p, u64 dsq_id, 6144 u64 enq_flags) 6145 { 6146 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter, 6147 p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); 6148 } 6149 6150 __bpf_kfunc_end_defs(); 6151 6152 BTF_KFUNCS_START(scx_kfunc_ids_dispatch) 6153 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots) 6154 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel) 6155 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local) 6156 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU) 6157 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU) 6158 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU) 6159 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU) 6160 BTF_KFUNCS_END(scx_kfunc_ids_dispatch) 6161 6162 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = { 6163 .owner = THIS_MODULE, 6164 .set = &scx_kfunc_ids_dispatch, 6165 }; 6166 6167 static u32 reenq_local(struct rq *rq) 6168 { 6169 LIST_HEAD(tasks); 6170 u32 nr_enqueued = 0; 6171 struct task_struct *p, *n; 6172 6173 lockdep_assert_rq_held(rq); 6174 6175 /* 6176 * The BPF scheduler may choose to dispatch tasks back to 6177 * @rq->scx.local_dsq. Move all candidate tasks off to a private list 6178 * first to avoid processing the same tasks repeatedly. 6179 */ 6180 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list, 6181 scx.dsq_list.node) { 6182 /* 6183 * If @p is being migrated, @p's current CPU may not agree with 6184 * its allowed CPUs and the migration_cpu_stop is about to 6185 * deactivate and re-activate @p anyway. Skip re-enqueueing. 6186 * 6187 * While racing sched property changes may also dequeue and 6188 * re-enqueue a migrating task while its current CPU and allowed 6189 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to 6190 * the current local DSQ for running tasks and thus are not 6191 * visible to the BPF scheduler. 6192 */ 6193 if (p->migration_pending) 6194 continue; 6195 6196 dispatch_dequeue(rq, p); 6197 list_add_tail(&p->scx.dsq_list.node, &tasks); 6198 } 6199 6200 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) { 6201 list_del_init(&p->scx.dsq_list.node); 6202 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1); 6203 nr_enqueued++; 6204 } 6205 6206 return nr_enqueued; 6207 } 6208 6209 __bpf_kfunc_start_defs(); 6210 6211 /** 6212 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ 6213 * 6214 * Iterate over all of the tasks currently enqueued on the local DSQ of the 6215 * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of 6216 * processed tasks. Can only be called from ops.cpu_release(). 6217 * 6218 * COMPAT: Will be removed in v6.23 along with the ___v2 suffix on the void 6219 * returning variant that can be called from anywhere. 6220 */ 6221 __bpf_kfunc u32 scx_bpf_reenqueue_local(void) 6222 { 6223 struct scx_sched *sch; 6224 struct rq *rq; 6225 6226 guard(rcu)(); 6227 sch = rcu_dereference(scx_root); 6228 if (unlikely(!sch)) 6229 return 0; 6230 6231 if (!scx_kf_allowed(sch, SCX_KF_CPU_RELEASE)) 6232 return 0; 6233 6234 rq = cpu_rq(smp_processor_id()); 6235 lockdep_assert_rq_held(rq); 6236 6237 return reenq_local(rq); 6238 } 6239 6240 __bpf_kfunc_end_defs(); 6241 6242 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release) 6243 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local) 6244 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release) 6245 6246 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = { 6247 .owner = THIS_MODULE, 6248 .set = &scx_kfunc_ids_cpu_release, 6249 }; 6250 6251 __bpf_kfunc_start_defs(); 6252 6253 /** 6254 * scx_bpf_create_dsq - Create a custom DSQ 6255 * @dsq_id: DSQ to create 6256 * @node: NUMA node to allocate from 6257 * 6258 * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable 6259 * scx callback, and any BPF_PROG_TYPE_SYSCALL prog. 6260 */ 6261 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) 6262 { 6263 struct scx_dispatch_q *dsq; 6264 struct scx_sched *sch; 6265 s32 ret; 6266 6267 if (unlikely(node >= (int)nr_node_ids || 6268 (node < 0 && node != NUMA_NO_NODE))) 6269 return -EINVAL; 6270 6271 if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN)) 6272 return -EINVAL; 6273 6274 dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node); 6275 if (!dsq) 6276 return -ENOMEM; 6277 6278 init_dsq(dsq, dsq_id); 6279 6280 rcu_read_lock(); 6281 6282 sch = rcu_dereference(scx_root); 6283 if (sch) 6284 ret = rhashtable_lookup_insert_fast(&sch->dsq_hash, &dsq->hash_node, 6285 dsq_hash_params); 6286 else 6287 ret = -ENODEV; 6288 6289 rcu_read_unlock(); 6290 if (ret) 6291 kfree(dsq); 6292 return ret; 6293 } 6294 6295 __bpf_kfunc_end_defs(); 6296 6297 BTF_KFUNCS_START(scx_kfunc_ids_unlocked) 6298 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE) 6299 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU) 6300 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU) 6301 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU) 6302 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU) 6303 BTF_KFUNCS_END(scx_kfunc_ids_unlocked) 6304 6305 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = { 6306 .owner = THIS_MODULE, 6307 .set = &scx_kfunc_ids_unlocked, 6308 }; 6309 6310 __bpf_kfunc_start_defs(); 6311 6312 /** 6313 * scx_bpf_task_set_slice - Set task's time slice 6314 * @p: task of interest 6315 * @slice: time slice to set in nsecs 6316 * 6317 * Set @p's time slice to @slice. Returns %true on success, %false if the 6318 * calling scheduler doesn't have authority over @p. 6319 */ 6320 __bpf_kfunc bool scx_bpf_task_set_slice(struct task_struct *p, u64 slice) 6321 { 6322 p->scx.slice = slice; 6323 return true; 6324 } 6325 6326 /** 6327 * scx_bpf_task_set_dsq_vtime - Set task's virtual time for DSQ ordering 6328 * @p: task of interest 6329 * @vtime: virtual time to set 6330 * 6331 * Set @p's virtual time to @vtime. Returns %true on success, %false if the 6332 * calling scheduler doesn't have authority over @p. 6333 */ 6334 __bpf_kfunc bool scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime) 6335 { 6336 p->scx.dsq_vtime = vtime; 6337 return true; 6338 } 6339 6340 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags) 6341 { 6342 struct rq *this_rq; 6343 unsigned long irq_flags; 6344 6345 if (!ops_cpu_valid(sch, cpu, NULL)) 6346 return; 6347 6348 local_irq_save(irq_flags); 6349 6350 this_rq = this_rq(); 6351 6352 /* 6353 * While bypassing for PM ops, IRQ handling may not be online which can 6354 * lead to irq_work_queue() malfunction such as infinite busy wait for 6355 * IRQ status update. Suppress kicking. 6356 */ 6357 if (scx_rq_bypassing(this_rq)) 6358 goto out; 6359 6360 /* 6361 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting 6362 * rq locks. We can probably be smarter and avoid bouncing if called 6363 * from ops which don't hold a rq lock. 6364 */ 6365 if (flags & SCX_KICK_IDLE) { 6366 struct rq *target_rq = cpu_rq(cpu); 6367 6368 if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT))) 6369 scx_error(sch, "PREEMPT/WAIT cannot be used with SCX_KICK_IDLE"); 6370 6371 if (raw_spin_rq_trylock(target_rq)) { 6372 if (can_skip_idle_kick(target_rq)) { 6373 raw_spin_rq_unlock(target_rq); 6374 goto out; 6375 } 6376 raw_spin_rq_unlock(target_rq); 6377 } 6378 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle); 6379 } else { 6380 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick); 6381 6382 if (flags & SCX_KICK_PREEMPT) 6383 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt); 6384 if (flags & SCX_KICK_WAIT) 6385 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait); 6386 } 6387 6388 irq_work_queue(&this_rq->scx.kick_cpus_irq_work); 6389 out: 6390 local_irq_restore(irq_flags); 6391 } 6392 6393 /** 6394 * scx_bpf_kick_cpu - Trigger reschedule on a CPU 6395 * @cpu: cpu to kick 6396 * @flags: %SCX_KICK_* flags 6397 * 6398 * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or 6399 * trigger rescheduling on a busy CPU. This can be called from any online 6400 * scx_ops operation and the actual kicking is performed asynchronously through 6401 * an irq work. 6402 */ 6403 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags) 6404 { 6405 struct scx_sched *sch; 6406 6407 guard(rcu)(); 6408 sch = rcu_dereference(scx_root); 6409 if (likely(sch)) 6410 scx_kick_cpu(sch, cpu, flags); 6411 } 6412 6413 /** 6414 * scx_bpf_dsq_nr_queued - Return the number of queued tasks 6415 * @dsq_id: id of the DSQ 6416 * 6417 * Return the number of tasks in the DSQ matching @dsq_id. If not found, 6418 * -%ENOENT is returned. 6419 */ 6420 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id) 6421 { 6422 struct scx_sched *sch; 6423 struct scx_dispatch_q *dsq; 6424 s32 ret; 6425 6426 preempt_disable(); 6427 6428 sch = rcu_dereference_sched(scx_root); 6429 if (unlikely(!sch)) { 6430 ret = -ENODEV; 6431 goto out; 6432 } 6433 6434 if (dsq_id == SCX_DSQ_LOCAL) { 6435 ret = READ_ONCE(this_rq()->scx.local_dsq.nr); 6436 goto out; 6437 } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) { 6438 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK; 6439 6440 if (ops_cpu_valid(sch, cpu, NULL)) { 6441 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr); 6442 goto out; 6443 } 6444 } else { 6445 dsq = find_user_dsq(sch, dsq_id); 6446 if (dsq) { 6447 ret = READ_ONCE(dsq->nr); 6448 goto out; 6449 } 6450 } 6451 ret = -ENOENT; 6452 out: 6453 preempt_enable(); 6454 return ret; 6455 } 6456 6457 /** 6458 * scx_bpf_destroy_dsq - Destroy a custom DSQ 6459 * @dsq_id: DSQ to destroy 6460 * 6461 * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with 6462 * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is 6463 * empty and no further tasks are dispatched to it. Ignored if called on a DSQ 6464 * which doesn't exist. Can be called from any online scx_ops operations. 6465 */ 6466 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id) 6467 { 6468 struct scx_sched *sch; 6469 6470 rcu_read_lock(); 6471 sch = rcu_dereference(scx_root); 6472 if (sch) 6473 destroy_dsq(sch, dsq_id); 6474 rcu_read_unlock(); 6475 } 6476 6477 /** 6478 * bpf_iter_scx_dsq_new - Create a DSQ iterator 6479 * @it: iterator to initialize 6480 * @dsq_id: DSQ to iterate 6481 * @flags: %SCX_DSQ_ITER_* 6482 * 6483 * Initialize BPF iterator @it which can be used with bpf_for_each() to walk 6484 * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes 6485 * tasks which are already queued when this function is invoked. 6486 */ 6487 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, 6488 u64 flags) 6489 { 6490 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 6491 struct scx_sched *sch; 6492 6493 BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) > 6494 sizeof(struct bpf_iter_scx_dsq)); 6495 BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) != 6496 __alignof__(struct bpf_iter_scx_dsq)); 6497 BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS & 6498 ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1)); 6499 6500 /* 6501 * next() and destroy() will be called regardless of the return value. 6502 * Always clear $kit->dsq. 6503 */ 6504 kit->dsq = NULL; 6505 6506 sch = rcu_dereference_check(scx_root, rcu_read_lock_bh_held()); 6507 if (unlikely(!sch)) 6508 return -ENODEV; 6509 6510 if (flags & ~__SCX_DSQ_ITER_USER_FLAGS) 6511 return -EINVAL; 6512 6513 kit->dsq = find_user_dsq(sch, dsq_id); 6514 if (!kit->dsq) 6515 return -ENOENT; 6516 6517 kit->cursor = INIT_DSQ_LIST_CURSOR(kit->cursor, flags, 6518 READ_ONCE(kit->dsq->seq)); 6519 6520 return 0; 6521 } 6522 6523 /** 6524 * bpf_iter_scx_dsq_next - Progress a DSQ iterator 6525 * @it: iterator to progress 6526 * 6527 * Return the next task. See bpf_iter_scx_dsq_new(). 6528 */ 6529 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) 6530 { 6531 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 6532 bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV; 6533 struct task_struct *p; 6534 unsigned long flags; 6535 6536 if (!kit->dsq) 6537 return NULL; 6538 6539 raw_spin_lock_irqsave(&kit->dsq->lock, flags); 6540 6541 if (list_empty(&kit->cursor.node)) 6542 p = NULL; 6543 else 6544 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list); 6545 6546 /* 6547 * Only tasks which were queued before the iteration started are 6548 * visible. This bounds BPF iterations and guarantees that vtime never 6549 * jumps in the other direction while iterating. 6550 */ 6551 do { 6552 p = nldsq_next_task(kit->dsq, p, rev); 6553 } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq))); 6554 6555 if (p) { 6556 if (rev) 6557 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node); 6558 else 6559 list_move(&kit->cursor.node, &p->scx.dsq_list.node); 6560 } else { 6561 list_del_init(&kit->cursor.node); 6562 } 6563 6564 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags); 6565 6566 return p; 6567 } 6568 6569 /** 6570 * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator 6571 * @it: iterator to destroy 6572 * 6573 * Undo scx_iter_scx_dsq_new(). 6574 */ 6575 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) 6576 { 6577 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 6578 6579 if (!kit->dsq) 6580 return; 6581 6582 if (!list_empty(&kit->cursor.node)) { 6583 unsigned long flags; 6584 6585 raw_spin_lock_irqsave(&kit->dsq->lock, flags); 6586 list_del_init(&kit->cursor.node); 6587 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags); 6588 } 6589 kit->dsq = NULL; 6590 } 6591 6592 /** 6593 * scx_bpf_dsq_peek - Lockless peek at the first element. 6594 * @dsq_id: DSQ to examine. 6595 * 6596 * Read the first element in the DSQ. This is semantically equivalent to using 6597 * the DSQ iterator, but is lockfree. Of course, like any lockless operation, 6598 * this provides only a point-in-time snapshot, and the contents may change 6599 * by the time any subsequent locking operation reads the queue. 6600 * 6601 * Returns the pointer, or NULL indicates an empty queue OR internal error. 6602 */ 6603 __bpf_kfunc struct task_struct *scx_bpf_dsq_peek(u64 dsq_id) 6604 { 6605 struct scx_sched *sch; 6606 struct scx_dispatch_q *dsq; 6607 6608 sch = rcu_dereference(scx_root); 6609 if (unlikely(!sch)) 6610 return NULL; 6611 6612 if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN)) { 6613 scx_error(sch, "peek disallowed on builtin DSQ 0x%llx", dsq_id); 6614 return NULL; 6615 } 6616 6617 dsq = find_user_dsq(sch, dsq_id); 6618 if (unlikely(!dsq)) { 6619 scx_error(sch, "peek on non-existent DSQ 0x%llx", dsq_id); 6620 return NULL; 6621 } 6622 6623 return rcu_dereference(dsq->first_task); 6624 } 6625 6626 __bpf_kfunc_end_defs(); 6627 6628 static s32 __bstr_format(struct scx_sched *sch, u64 *data_buf, char *line_buf, 6629 size_t line_size, char *fmt, unsigned long long *data, 6630 u32 data__sz) 6631 { 6632 struct bpf_bprintf_data bprintf_data = { .get_bin_args = true }; 6633 s32 ret; 6634 6635 if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 || 6636 (data__sz && !data)) { 6637 scx_error(sch, "invalid data=%p and data__sz=%u", (void *)data, data__sz); 6638 return -EINVAL; 6639 } 6640 6641 ret = copy_from_kernel_nofault(data_buf, data, data__sz); 6642 if (ret < 0) { 6643 scx_error(sch, "failed to read data fields (%d)", ret); 6644 return ret; 6645 } 6646 6647 ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8, 6648 &bprintf_data); 6649 if (ret < 0) { 6650 scx_error(sch, "format preparation failed (%d)", ret); 6651 return ret; 6652 } 6653 6654 ret = bstr_printf(line_buf, line_size, fmt, 6655 bprintf_data.bin_args); 6656 bpf_bprintf_cleanup(&bprintf_data); 6657 if (ret < 0) { 6658 scx_error(sch, "(\"%s\", %p, %u) failed to format", fmt, data, data__sz); 6659 return ret; 6660 } 6661 6662 return ret; 6663 } 6664 6665 static s32 bstr_format(struct scx_sched *sch, struct scx_bstr_buf *buf, 6666 char *fmt, unsigned long long *data, u32 data__sz) 6667 { 6668 return __bstr_format(sch, buf->data, buf->line, sizeof(buf->line), 6669 fmt, data, data__sz); 6670 } 6671 6672 __bpf_kfunc_start_defs(); 6673 6674 /** 6675 * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler. 6676 * @exit_code: Exit value to pass to user space via struct scx_exit_info. 6677 * @fmt: error message format string 6678 * @data: format string parameters packaged using ___bpf_fill() macro 6679 * @data__sz: @data len, must end in '__sz' for the verifier 6680 * 6681 * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops 6682 * disabling. 6683 */ 6684 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt, 6685 unsigned long long *data, u32 data__sz) 6686 { 6687 struct scx_sched *sch; 6688 unsigned long flags; 6689 6690 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags); 6691 sch = rcu_dereference_bh(scx_root); 6692 if (likely(sch) && 6693 bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0) 6694 scx_exit(sch, SCX_EXIT_UNREG_BPF, exit_code, "%s", scx_exit_bstr_buf.line); 6695 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags); 6696 } 6697 6698 /** 6699 * scx_bpf_error_bstr - Indicate fatal error 6700 * @fmt: error message format string 6701 * @data: format string parameters packaged using ___bpf_fill() macro 6702 * @data__sz: @data len, must end in '__sz' for the verifier 6703 * 6704 * Indicate that the BPF scheduler encountered a fatal error and initiate ops 6705 * disabling. 6706 */ 6707 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data, 6708 u32 data__sz) 6709 { 6710 struct scx_sched *sch; 6711 unsigned long flags; 6712 6713 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags); 6714 sch = rcu_dereference_bh(scx_root); 6715 if (likely(sch) && 6716 bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0) 6717 scx_exit(sch, SCX_EXIT_ERROR_BPF, 0, "%s", scx_exit_bstr_buf.line); 6718 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags); 6719 } 6720 6721 /** 6722 * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler 6723 * @fmt: format string 6724 * @data: format string parameters packaged using ___bpf_fill() macro 6725 * @data__sz: @data len, must end in '__sz' for the verifier 6726 * 6727 * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and 6728 * dump_task() to generate extra debug dump specific to the BPF scheduler. 6729 * 6730 * The extra dump may be multiple lines. A single line may be split over 6731 * multiple calls. The last line is automatically terminated. 6732 */ 6733 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, 6734 u32 data__sz) 6735 { 6736 struct scx_sched *sch; 6737 struct scx_dump_data *dd = &scx_dump_data; 6738 struct scx_bstr_buf *buf = &dd->buf; 6739 s32 ret; 6740 6741 guard(rcu)(); 6742 6743 sch = rcu_dereference(scx_root); 6744 if (unlikely(!sch)) 6745 return; 6746 6747 if (raw_smp_processor_id() != dd->cpu) { 6748 scx_error(sch, "scx_bpf_dump() must only be called from ops.dump() and friends"); 6749 return; 6750 } 6751 6752 /* append the formatted string to the line buf */ 6753 ret = __bstr_format(sch, buf->data, buf->line + dd->cursor, 6754 sizeof(buf->line) - dd->cursor, fmt, data, data__sz); 6755 if (ret < 0) { 6756 dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)", 6757 dd->prefix, fmt, data, data__sz, ret); 6758 return; 6759 } 6760 6761 dd->cursor += ret; 6762 dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line)); 6763 6764 if (!dd->cursor) 6765 return; 6766 6767 /* 6768 * If the line buf overflowed or ends in a newline, flush it into the 6769 * dump. This is to allow the caller to generate a single line over 6770 * multiple calls. As ops_dump_flush() can also handle multiple lines in 6771 * the line buf, the only case which can lead to an unexpected 6772 * truncation is when the caller keeps generating newlines in the middle 6773 * instead of the end consecutively. Don't do that. 6774 */ 6775 if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n') 6776 ops_dump_flush(); 6777 } 6778 6779 /** 6780 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ 6781 * 6782 * Iterate over all of the tasks currently enqueued on the local DSQ of the 6783 * caller's CPU, and re-enqueue them in the BPF scheduler. Can be called from 6784 * anywhere. 6785 */ 6786 __bpf_kfunc void scx_bpf_reenqueue_local___v2(void) 6787 { 6788 struct rq *rq; 6789 6790 guard(preempt)(); 6791 6792 rq = this_rq(); 6793 local_set(&rq->scx.reenq_local_deferred, 1); 6794 schedule_deferred(rq); 6795 } 6796 6797 /** 6798 * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU 6799 * @cpu: CPU of interest 6800 * 6801 * Return the maximum relative capacity of @cpu in relation to the most 6802 * performant CPU in the system. The return value is in the range [1, 6803 * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur(). 6804 */ 6805 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu) 6806 { 6807 struct scx_sched *sch; 6808 6809 guard(rcu)(); 6810 6811 sch = rcu_dereference(scx_root); 6812 if (likely(sch) && ops_cpu_valid(sch, cpu, NULL)) 6813 return arch_scale_cpu_capacity(cpu); 6814 else 6815 return SCX_CPUPERF_ONE; 6816 } 6817 6818 /** 6819 * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU 6820 * @cpu: CPU of interest 6821 * 6822 * Return the current relative performance of @cpu in relation to its maximum. 6823 * The return value is in the range [1, %SCX_CPUPERF_ONE]. 6824 * 6825 * The current performance level of a CPU in relation to the maximum performance 6826 * available in the system can be calculated as follows: 6827 * 6828 * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE 6829 * 6830 * The result is in the range [1, %SCX_CPUPERF_ONE]. 6831 */ 6832 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu) 6833 { 6834 struct scx_sched *sch; 6835 6836 guard(rcu)(); 6837 6838 sch = rcu_dereference(scx_root); 6839 if (likely(sch) && ops_cpu_valid(sch, cpu, NULL)) 6840 return arch_scale_freq_capacity(cpu); 6841 else 6842 return SCX_CPUPERF_ONE; 6843 } 6844 6845 /** 6846 * scx_bpf_cpuperf_set - Set the relative performance target of a CPU 6847 * @cpu: CPU of interest 6848 * @perf: target performance level [0, %SCX_CPUPERF_ONE] 6849 * 6850 * Set the target performance level of @cpu to @perf. @perf is in linear 6851 * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the 6852 * schedutil cpufreq governor chooses the target frequency. 6853 * 6854 * The actual performance level chosen, CPU grouping, and the overhead and 6855 * latency of the operations are dependent on the hardware and cpufreq driver in 6856 * use. Consult hardware and cpufreq documentation for more information. The 6857 * current performance level can be monitored using scx_bpf_cpuperf_cur(). 6858 */ 6859 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf) 6860 { 6861 struct scx_sched *sch; 6862 6863 guard(rcu)(); 6864 6865 sch = rcu_dereference(scx_root); 6866 if (unlikely(!sch)) 6867 return; 6868 6869 if (unlikely(perf > SCX_CPUPERF_ONE)) { 6870 scx_error(sch, "Invalid cpuperf target %u for CPU %d", perf, cpu); 6871 return; 6872 } 6873 6874 if (ops_cpu_valid(sch, cpu, NULL)) { 6875 struct rq *rq = cpu_rq(cpu), *locked_rq = scx_locked_rq(); 6876 struct rq_flags rf; 6877 6878 /* 6879 * When called with an rq lock held, restrict the operation 6880 * to the corresponding CPU to prevent ABBA deadlocks. 6881 */ 6882 if (locked_rq && rq != locked_rq) { 6883 scx_error(sch, "Invalid target CPU %d", cpu); 6884 return; 6885 } 6886 6887 /* 6888 * If no rq lock is held, allow to operate on any CPU by 6889 * acquiring the corresponding rq lock. 6890 */ 6891 if (!locked_rq) { 6892 rq_lock_irqsave(rq, &rf); 6893 update_rq_clock(rq); 6894 } 6895 6896 rq->scx.cpuperf_target = perf; 6897 cpufreq_update_util(rq, 0); 6898 6899 if (!locked_rq) 6900 rq_unlock_irqrestore(rq, &rf); 6901 } 6902 } 6903 6904 /** 6905 * scx_bpf_nr_node_ids - Return the number of possible node IDs 6906 * 6907 * All valid node IDs in the system are smaller than the returned value. 6908 */ 6909 __bpf_kfunc u32 scx_bpf_nr_node_ids(void) 6910 { 6911 return nr_node_ids; 6912 } 6913 6914 /** 6915 * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs 6916 * 6917 * All valid CPU IDs in the system are smaller than the returned value. 6918 */ 6919 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void) 6920 { 6921 return nr_cpu_ids; 6922 } 6923 6924 /** 6925 * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask 6926 */ 6927 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void) 6928 { 6929 return cpu_possible_mask; 6930 } 6931 6932 /** 6933 * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask 6934 */ 6935 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void) 6936 { 6937 return cpu_online_mask; 6938 } 6939 6940 /** 6941 * scx_bpf_put_cpumask - Release a possible/online cpumask 6942 * @cpumask: cpumask to release 6943 */ 6944 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask) 6945 { 6946 /* 6947 * Empty function body because we aren't actually acquiring or releasing 6948 * a reference to a global cpumask, which is read-only in the caller and 6949 * is never released. The acquire / release semantics here are just used 6950 * to make the cpumask is a trusted pointer in the caller. 6951 */ 6952 } 6953 6954 /** 6955 * scx_bpf_task_running - Is task currently running? 6956 * @p: task of interest 6957 */ 6958 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p) 6959 { 6960 return task_rq(p)->curr == p; 6961 } 6962 6963 /** 6964 * scx_bpf_task_cpu - CPU a task is currently associated with 6965 * @p: task of interest 6966 */ 6967 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p) 6968 { 6969 return task_cpu(p); 6970 } 6971 6972 /** 6973 * scx_bpf_cpu_rq - Fetch the rq of a CPU 6974 * @cpu: CPU of the rq 6975 */ 6976 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu) 6977 { 6978 struct scx_sched *sch; 6979 6980 guard(rcu)(); 6981 6982 sch = rcu_dereference(scx_root); 6983 if (unlikely(!sch)) 6984 return NULL; 6985 6986 if (!ops_cpu_valid(sch, cpu, NULL)) 6987 return NULL; 6988 6989 if (!sch->warned_deprecated_rq) { 6990 printk_deferred(KERN_WARNING "sched_ext: %s() is deprecated; " 6991 "use scx_bpf_locked_rq() when holding rq lock " 6992 "or scx_bpf_cpu_curr() to read remote curr safely.\n", __func__); 6993 sch->warned_deprecated_rq = true; 6994 } 6995 6996 return cpu_rq(cpu); 6997 } 6998 6999 /** 7000 * scx_bpf_locked_rq - Return the rq currently locked by SCX 7001 * 7002 * Returns the rq if a rq lock is currently held by SCX. 7003 * Otherwise emits an error and returns NULL. 7004 */ 7005 __bpf_kfunc struct rq *scx_bpf_locked_rq(void) 7006 { 7007 struct scx_sched *sch; 7008 struct rq *rq; 7009 7010 guard(preempt)(); 7011 7012 sch = rcu_dereference_sched(scx_root); 7013 if (unlikely(!sch)) 7014 return NULL; 7015 7016 rq = scx_locked_rq(); 7017 if (!rq) { 7018 scx_error(sch, "accessing rq without holding rq lock"); 7019 return NULL; 7020 } 7021 7022 return rq; 7023 } 7024 7025 /** 7026 * scx_bpf_cpu_curr - Return remote CPU's curr task 7027 * @cpu: CPU of interest 7028 * 7029 * Callers must hold RCU read lock (KF_RCU). 7030 */ 7031 __bpf_kfunc struct task_struct *scx_bpf_cpu_curr(s32 cpu) 7032 { 7033 struct scx_sched *sch; 7034 7035 guard(rcu)(); 7036 7037 sch = rcu_dereference(scx_root); 7038 if (unlikely(!sch)) 7039 return NULL; 7040 7041 if (!ops_cpu_valid(sch, cpu, NULL)) 7042 return NULL; 7043 7044 return rcu_dereference(cpu_rq(cpu)->curr); 7045 } 7046 7047 /** 7048 * scx_bpf_task_cgroup - Return the sched cgroup of a task 7049 * @p: task of interest 7050 * 7051 * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with 7052 * from the scheduler's POV. SCX operations should use this function to 7053 * determine @p's current cgroup as, unlike following @p->cgroups, 7054 * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all 7055 * rq-locked operations. Can be called on the parameter tasks of rq-locked 7056 * operations. The restriction guarantees that @p's rq is locked by the caller. 7057 */ 7058 #ifdef CONFIG_CGROUP_SCHED 7059 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) 7060 { 7061 struct task_group *tg = p->sched_task_group; 7062 struct cgroup *cgrp = &cgrp_dfl_root.cgrp; 7063 struct scx_sched *sch; 7064 7065 guard(rcu)(); 7066 7067 sch = rcu_dereference(scx_root); 7068 if (unlikely(!sch)) 7069 goto out; 7070 7071 if (!scx_kf_allowed_on_arg_tasks(sch, __SCX_KF_RQ_LOCKED, p)) 7072 goto out; 7073 7074 cgrp = tg_cgrp(tg); 7075 7076 out: 7077 cgroup_get(cgrp); 7078 return cgrp; 7079 } 7080 #endif 7081 7082 /** 7083 * scx_bpf_now - Returns a high-performance monotonically non-decreasing 7084 * clock for the current CPU. The clock returned is in nanoseconds. 7085 * 7086 * It provides the following properties: 7087 * 7088 * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently 7089 * to account for execution time and track tasks' runtime properties. 7090 * Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which 7091 * eventually reads a hardware timestamp counter -- is neither performant nor 7092 * scalable. scx_bpf_now() aims to provide a high-performance clock by 7093 * using the rq clock in the scheduler core whenever possible. 7094 * 7095 * 2) High enough resolution for the BPF scheduler use cases: In most BPF 7096 * scheduler use cases, the required clock resolution is lower than the most 7097 * accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically 7098 * uses the rq clock in the scheduler core whenever it is valid. It considers 7099 * that the rq clock is valid from the time the rq clock is updated 7100 * (update_rq_clock) until the rq is unlocked (rq_unpin_lock). 7101 * 7102 * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now() 7103 * guarantees the clock never goes backward when comparing them in the same 7104 * CPU. On the other hand, when comparing clocks in different CPUs, there 7105 * is no such guarantee -- the clock can go backward. It provides a 7106 * monotonically *non-decreasing* clock so that it would provide the same 7107 * clock values in two different scx_bpf_now() calls in the same CPU 7108 * during the same period of when the rq clock is valid. 7109 */ 7110 __bpf_kfunc u64 scx_bpf_now(void) 7111 { 7112 struct rq *rq; 7113 u64 clock; 7114 7115 preempt_disable(); 7116 7117 rq = this_rq(); 7118 if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) { 7119 /* 7120 * If the rq clock is valid, use the cached rq clock. 7121 * 7122 * Note that scx_bpf_now() is re-entrant between a process 7123 * context and an interrupt context (e.g., timer interrupt). 7124 * However, we don't need to consider the race between them 7125 * because such race is not observable from a caller. 7126 */ 7127 clock = READ_ONCE(rq->scx.clock); 7128 } else { 7129 /* 7130 * Otherwise, return a fresh rq clock. 7131 * 7132 * The rq clock is updated outside of the rq lock. 7133 * In this case, keep the updated rq clock invalid so the next 7134 * kfunc call outside the rq lock gets a fresh rq clock. 7135 */ 7136 clock = sched_clock_cpu(cpu_of(rq)); 7137 } 7138 7139 preempt_enable(); 7140 7141 return clock; 7142 } 7143 7144 static void scx_read_events(struct scx_sched *sch, struct scx_event_stats *events) 7145 { 7146 struct scx_event_stats *e_cpu; 7147 int cpu; 7148 7149 /* Aggregate per-CPU event counters into @events. */ 7150 memset(events, 0, sizeof(*events)); 7151 for_each_possible_cpu(cpu) { 7152 e_cpu = &per_cpu_ptr(sch->pcpu, cpu)->event_stats; 7153 scx_agg_event(events, e_cpu, SCX_EV_SELECT_CPU_FALLBACK); 7154 scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE); 7155 scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_KEEP_LAST); 7156 scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_EXITING); 7157 scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED); 7158 scx_agg_event(events, e_cpu, SCX_EV_REFILL_SLICE_DFL); 7159 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DURATION); 7160 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DISPATCH); 7161 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_ACTIVATE); 7162 } 7163 } 7164 7165 /* 7166 * scx_bpf_events - Get a system-wide event counter to 7167 * @events: output buffer from a BPF program 7168 * @events__sz: @events len, must end in '__sz'' for the verifier 7169 */ 7170 __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events, 7171 size_t events__sz) 7172 { 7173 struct scx_sched *sch; 7174 struct scx_event_stats e_sys; 7175 7176 rcu_read_lock(); 7177 sch = rcu_dereference(scx_root); 7178 if (sch) 7179 scx_read_events(sch, &e_sys); 7180 else 7181 memset(&e_sys, 0, sizeof(e_sys)); 7182 rcu_read_unlock(); 7183 7184 /* 7185 * We cannot entirely trust a BPF-provided size since a BPF program 7186 * might be compiled against a different vmlinux.h, of which 7187 * scx_event_stats would be larger (a newer vmlinux.h) or smaller 7188 * (an older vmlinux.h). Hence, we use the smaller size to avoid 7189 * memory corruption. 7190 */ 7191 events__sz = min(events__sz, sizeof(*events)); 7192 memcpy(events, &e_sys, events__sz); 7193 } 7194 7195 __bpf_kfunc_end_defs(); 7196 7197 BTF_KFUNCS_START(scx_kfunc_ids_any) 7198 BTF_ID_FLAGS(func, scx_bpf_task_set_slice, KF_RCU); 7199 BTF_ID_FLAGS(func, scx_bpf_task_set_dsq_vtime, KF_RCU); 7200 BTF_ID_FLAGS(func, scx_bpf_kick_cpu) 7201 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued) 7202 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq) 7203 BTF_ID_FLAGS(func, scx_bpf_dsq_peek, KF_RCU_PROTECTED | KF_RET_NULL) 7204 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED) 7205 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL) 7206 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY) 7207 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS) 7208 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS) 7209 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS) 7210 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local___v2) 7211 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap) 7212 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur) 7213 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set) 7214 BTF_ID_FLAGS(func, scx_bpf_nr_node_ids) 7215 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids) 7216 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE) 7217 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE) 7218 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE) 7219 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU) 7220 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU) 7221 BTF_ID_FLAGS(func, scx_bpf_cpu_rq) 7222 BTF_ID_FLAGS(func, scx_bpf_locked_rq, KF_RET_NULL) 7223 BTF_ID_FLAGS(func, scx_bpf_cpu_curr, KF_RET_NULL | KF_RCU_PROTECTED) 7224 #ifdef CONFIG_CGROUP_SCHED 7225 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE) 7226 #endif 7227 BTF_ID_FLAGS(func, scx_bpf_now) 7228 BTF_ID_FLAGS(func, scx_bpf_events, KF_TRUSTED_ARGS) 7229 BTF_KFUNCS_END(scx_kfunc_ids_any) 7230 7231 static const struct btf_kfunc_id_set scx_kfunc_set_any = { 7232 .owner = THIS_MODULE, 7233 .set = &scx_kfunc_ids_any, 7234 }; 7235 7236 static int __init scx_init(void) 7237 { 7238 int ret; 7239 7240 /* 7241 * kfunc registration can't be done from init_sched_ext_class() as 7242 * register_btf_kfunc_id_set() needs most of the system to be up. 7243 * 7244 * Some kfuncs are context-sensitive and can only be called from 7245 * specific SCX ops. They are grouped into BTF sets accordingly. 7246 * Unfortunately, BPF currently doesn't have a way of enforcing such 7247 * restrictions. Eventually, the verifier should be able to enforce 7248 * them. For now, register them the same and make each kfunc explicitly 7249 * check using scx_kf_allowed(). 7250 */ 7251 if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7252 &scx_kfunc_set_enqueue_dispatch)) || 7253 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7254 &scx_kfunc_set_dispatch)) || 7255 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7256 &scx_kfunc_set_cpu_release)) || 7257 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7258 &scx_kfunc_set_unlocked)) || 7259 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, 7260 &scx_kfunc_set_unlocked)) || 7261 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7262 &scx_kfunc_set_any)) || 7263 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, 7264 &scx_kfunc_set_any)) || 7265 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, 7266 &scx_kfunc_set_any))) { 7267 pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret); 7268 return ret; 7269 } 7270 7271 ret = scx_idle_init(); 7272 if (ret) { 7273 pr_err("sched_ext: Failed to initialize idle tracking (%d)\n", ret); 7274 return ret; 7275 } 7276 7277 ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops); 7278 if (ret) { 7279 pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret); 7280 return ret; 7281 } 7282 7283 ret = register_pm_notifier(&scx_pm_notifier); 7284 if (ret) { 7285 pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret); 7286 return ret; 7287 } 7288 7289 scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj); 7290 if (!scx_kset) { 7291 pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n"); 7292 return -ENOMEM; 7293 } 7294 7295 ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group); 7296 if (ret < 0) { 7297 pr_err("sched_ext: Failed to add global attributes\n"); 7298 return ret; 7299 } 7300 7301 if (!alloc_cpumask_var(&scx_bypass_lb_donee_cpumask, GFP_KERNEL) || 7302 !alloc_cpumask_var(&scx_bypass_lb_resched_cpumask, GFP_KERNEL)) { 7303 pr_err("sched_ext: Failed to allocate cpumasks\n"); 7304 return -ENOMEM; 7305 } 7306 7307 return 0; 7308 } 7309 __initcall(scx_init); 7310