1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst 4 * 5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org> 7 * Copyright (c) 2022 David Vernet <dvernet@meta.com> 8 */ 9 #include <linux/btf_ids.h> 10 #include "ext_idle.h" 11 12 /* 13 * NOTE: sched_ext is in the process of growing multiple scheduler support and 14 * scx_root usage is in a transitional state. Naked dereferences are safe if the 15 * caller is one of the tasks attached to SCX and explicit RCU dereference is 16 * necessary otherwise. Naked scx_root dereferences trigger sparse warnings but 17 * are used as temporary markers to indicate that the dereferences need to be 18 * updated to point to the associated scheduler instances rather than scx_root. 19 */ 20 static struct scx_sched __rcu *scx_root; 21 22 /* 23 * During exit, a task may schedule after losing its PIDs. When disabling the 24 * BPF scheduler, we need to be able to iterate tasks in every state to 25 * guarantee system safety. Maintain a dedicated task list which contains every 26 * task between its fork and eventual free. 27 */ 28 static DEFINE_RAW_SPINLOCK(scx_tasks_lock); 29 static LIST_HEAD(scx_tasks); 30 31 /* ops enable/disable */ 32 static DEFINE_MUTEX(scx_enable_mutex); 33 DEFINE_STATIC_KEY_FALSE(__scx_enabled); 34 DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem); 35 static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED); 36 static int scx_bypass_depth; 37 static cpumask_var_t scx_bypass_lb_donee_cpumask; 38 static cpumask_var_t scx_bypass_lb_resched_cpumask; 39 static bool scx_aborting; 40 static bool scx_init_task_enabled; 41 static bool scx_switching_all; 42 DEFINE_STATIC_KEY_FALSE(__scx_switched_all); 43 44 static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0); 45 static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0); 46 47 /* 48 * A monotically increasing sequence number that is incremented every time a 49 * scheduler is enabled. This can be used by to check if any custom sched_ext 50 * scheduler has ever been used in the system. 51 */ 52 static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0); 53 54 /* 55 * The maximum amount of time in jiffies that a task may be runnable without 56 * being scheduled on a CPU. If this timeout is exceeded, it will trigger 57 * scx_error(). 58 */ 59 static unsigned long scx_watchdog_timeout; 60 61 /* 62 * The last time the delayed work was run. This delayed work relies on 63 * ksoftirqd being able to run to service timer interrupts, so it's possible 64 * that this work itself could get wedged. To account for this, we check that 65 * it's not stalled in the timer tick, and trigger an error if it is. 66 */ 67 static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES; 68 69 static struct delayed_work scx_watchdog_work; 70 71 /* 72 * For %SCX_KICK_WAIT: Each CPU has a pointer to an array of kick_sync sequence 73 * numbers. The arrays are allocated with kvzalloc() as size can exceed percpu 74 * allocator limits on large machines. O(nr_cpu_ids^2) allocation, allocated 75 * lazily when enabling and freed when disabling to avoid waste when sched_ext 76 * isn't active. 77 */ 78 struct scx_kick_syncs { 79 struct rcu_head rcu; 80 unsigned long syncs[]; 81 }; 82 83 static DEFINE_PER_CPU(struct scx_kick_syncs __rcu *, scx_kick_syncs); 84 85 /* 86 * Direct dispatch marker. 87 * 88 * Non-NULL values are used for direct dispatch from enqueue path. A valid 89 * pointer points to the task currently being enqueued. An ERR_PTR value is used 90 * to indicate that direct dispatch has already happened. 91 */ 92 static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task); 93 94 static const struct rhashtable_params dsq_hash_params = { 95 .key_len = sizeof_field(struct scx_dispatch_q, id), 96 .key_offset = offsetof(struct scx_dispatch_q, id), 97 .head_offset = offsetof(struct scx_dispatch_q, hash_node), 98 }; 99 100 static LLIST_HEAD(dsqs_to_free); 101 102 /* dispatch buf */ 103 struct scx_dsp_buf_ent { 104 struct task_struct *task; 105 unsigned long qseq; 106 u64 dsq_id; 107 u64 enq_flags; 108 }; 109 110 static u32 scx_dsp_max_batch; 111 112 struct scx_dsp_ctx { 113 struct rq *rq; 114 u32 cursor; 115 u32 nr_tasks; 116 struct scx_dsp_buf_ent buf[]; 117 }; 118 119 static struct scx_dsp_ctx __percpu *scx_dsp_ctx; 120 121 /* string formatting from BPF */ 122 struct scx_bstr_buf { 123 u64 data[MAX_BPRINTF_VARARGS]; 124 char line[SCX_EXIT_MSG_LEN]; 125 }; 126 127 static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock); 128 static struct scx_bstr_buf scx_exit_bstr_buf; 129 130 /* ops debug dump */ 131 struct scx_dump_data { 132 s32 cpu; 133 bool first; 134 s32 cursor; 135 struct seq_buf *s; 136 const char *prefix; 137 struct scx_bstr_buf buf; 138 }; 139 140 static struct scx_dump_data scx_dump_data = { 141 .cpu = -1, 142 }; 143 144 /* /sys/kernel/sched_ext interface */ 145 static struct kset *scx_kset; 146 147 /* 148 * Parameters that can be adjusted through /sys/module/sched_ext/parameters. 149 * There usually is no reason to modify these as normal scheduler operation 150 * shouldn't be affected by them. The knobs are primarily for debugging. 151 */ 152 static u64 scx_slice_dfl = SCX_SLICE_DFL; 153 static unsigned int scx_slice_bypass_us = SCX_SLICE_BYPASS / NSEC_PER_USEC; 154 static unsigned int scx_bypass_lb_intv_us = SCX_BYPASS_LB_DFL_INTV_US; 155 156 static int set_slice_us(const char *val, const struct kernel_param *kp) 157 { 158 return param_set_uint_minmax(val, kp, 100, 100 * USEC_PER_MSEC); 159 } 160 161 static const struct kernel_param_ops slice_us_param_ops = { 162 .set = set_slice_us, 163 .get = param_get_uint, 164 }; 165 166 static int set_bypass_lb_intv_us(const char *val, const struct kernel_param *kp) 167 { 168 return param_set_uint_minmax(val, kp, 0, 10 * USEC_PER_SEC); 169 } 170 171 static const struct kernel_param_ops bypass_lb_intv_us_param_ops = { 172 .set = set_bypass_lb_intv_us, 173 .get = param_get_uint, 174 }; 175 176 #undef MODULE_PARAM_PREFIX 177 #define MODULE_PARAM_PREFIX "sched_ext." 178 179 module_param_cb(slice_bypass_us, &slice_us_param_ops, &scx_slice_bypass_us, 0600); 180 MODULE_PARM_DESC(slice_bypass_us, "bypass slice in microseconds, applied on [un]load (100us to 100ms)"); 181 module_param_cb(bypass_lb_intv_us, &bypass_lb_intv_us_param_ops, &scx_bypass_lb_intv_us, 0600); 182 MODULE_PARM_DESC(bypass_lb_intv_us, "bypass load balance interval in microseconds (0 (disable) to 10s)"); 183 184 #undef MODULE_PARAM_PREFIX 185 186 #define CREATE_TRACE_POINTS 187 #include <trace/events/sched_ext.h> 188 189 static void process_ddsp_deferred_locals(struct rq *rq); 190 static u32 reenq_local(struct rq *rq); 191 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags); 192 static bool scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind, 193 s64 exit_code, const char *fmt, va_list args); 194 195 static __printf(4, 5) bool scx_exit(struct scx_sched *sch, 196 enum scx_exit_kind kind, s64 exit_code, 197 const char *fmt, ...) 198 { 199 va_list args; 200 bool ret; 201 202 va_start(args, fmt); 203 ret = scx_vexit(sch, kind, exit_code, fmt, args); 204 va_end(args); 205 206 return ret; 207 } 208 209 #define scx_error(sch, fmt, args...) scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args) 210 #define scx_verror(sch, fmt, args) scx_vexit((sch), SCX_EXIT_ERROR, 0, fmt, args) 211 212 #define SCX_HAS_OP(sch, op) test_bit(SCX_OP_IDX(op), (sch)->has_op) 213 214 static long jiffies_delta_msecs(unsigned long at, unsigned long now) 215 { 216 if (time_after(at, now)) 217 return jiffies_to_msecs(at - now); 218 else 219 return -(long)jiffies_to_msecs(now - at); 220 } 221 222 /* if the highest set bit is N, return a mask with bits [N+1, 31] set */ 223 static u32 higher_bits(u32 flags) 224 { 225 return ~((1 << fls(flags)) - 1); 226 } 227 228 /* return the mask with only the highest bit set */ 229 static u32 highest_bit(u32 flags) 230 { 231 int bit = fls(flags); 232 return ((u64)1 << bit) >> 1; 233 } 234 235 static bool u32_before(u32 a, u32 b) 236 { 237 return (s32)(a - b) < 0; 238 } 239 240 static struct scx_dispatch_q *find_global_dsq(struct scx_sched *sch, 241 struct task_struct *p) 242 { 243 return sch->global_dsqs[cpu_to_node(task_cpu(p))]; 244 } 245 246 static struct scx_dispatch_q *find_user_dsq(struct scx_sched *sch, u64 dsq_id) 247 { 248 return rhashtable_lookup(&sch->dsq_hash, &dsq_id, dsq_hash_params); 249 } 250 251 static const struct sched_class *scx_setscheduler_class(struct task_struct *p) 252 { 253 if (p->sched_class == &stop_sched_class) 254 return &stop_sched_class; 255 256 return __setscheduler_class(p->policy, p->prio); 257 } 258 259 /* 260 * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX 261 * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate 262 * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check 263 * whether it's running from an allowed context. 264 * 265 * @mask is constant, always inline to cull the mask calculations. 266 */ 267 static __always_inline void scx_kf_allow(u32 mask) 268 { 269 /* nesting is allowed only in increasing scx_kf_mask order */ 270 WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask, 271 "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n", 272 current->scx.kf_mask, mask); 273 current->scx.kf_mask |= mask; 274 barrier(); 275 } 276 277 static void scx_kf_disallow(u32 mask) 278 { 279 barrier(); 280 current->scx.kf_mask &= ~mask; 281 } 282 283 /* 284 * Track the rq currently locked. 285 * 286 * This allows kfuncs to safely operate on rq from any scx ops callback, 287 * knowing which rq is already locked. 288 */ 289 DEFINE_PER_CPU(struct rq *, scx_locked_rq_state); 290 291 static inline void update_locked_rq(struct rq *rq) 292 { 293 /* 294 * Check whether @rq is actually locked. This can help expose bugs 295 * or incorrect assumptions about the context in which a kfunc or 296 * callback is executed. 297 */ 298 if (rq) 299 lockdep_assert_rq_held(rq); 300 __this_cpu_write(scx_locked_rq_state, rq); 301 } 302 303 #define SCX_CALL_OP(sch, mask, op, rq, args...) \ 304 do { \ 305 if (rq) \ 306 update_locked_rq(rq); \ 307 if (mask) { \ 308 scx_kf_allow(mask); \ 309 (sch)->ops.op(args); \ 310 scx_kf_disallow(mask); \ 311 } else { \ 312 (sch)->ops.op(args); \ 313 } \ 314 if (rq) \ 315 update_locked_rq(NULL); \ 316 } while (0) 317 318 #define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \ 319 ({ \ 320 __typeof__((sch)->ops.op(args)) __ret; \ 321 \ 322 if (rq) \ 323 update_locked_rq(rq); \ 324 if (mask) { \ 325 scx_kf_allow(mask); \ 326 __ret = (sch)->ops.op(args); \ 327 scx_kf_disallow(mask); \ 328 } else { \ 329 __ret = (sch)->ops.op(args); \ 330 } \ 331 if (rq) \ 332 update_locked_rq(NULL); \ 333 __ret; \ 334 }) 335 336 /* 337 * Some kfuncs are allowed only on the tasks that are subjects of the 338 * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such 339 * restrictions, the following SCX_CALL_OP_*() variants should be used when 340 * invoking scx_ops operations that take task arguments. These can only be used 341 * for non-nesting operations due to the way the tasks are tracked. 342 * 343 * kfuncs which can only operate on such tasks can in turn use 344 * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on 345 * the specific task. 346 */ 347 #define SCX_CALL_OP_TASK(sch, mask, op, rq, task, args...) \ 348 do { \ 349 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 350 current->scx.kf_tasks[0] = task; \ 351 SCX_CALL_OP((sch), mask, op, rq, task, ##args); \ 352 current->scx.kf_tasks[0] = NULL; \ 353 } while (0) 354 355 #define SCX_CALL_OP_TASK_RET(sch, mask, op, rq, task, args...) \ 356 ({ \ 357 __typeof__((sch)->ops.op(task, ##args)) __ret; \ 358 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 359 current->scx.kf_tasks[0] = task; \ 360 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task, ##args); \ 361 current->scx.kf_tasks[0] = NULL; \ 362 __ret; \ 363 }) 364 365 #define SCX_CALL_OP_2TASKS_RET(sch, mask, op, rq, task0, task1, args...) \ 366 ({ \ 367 __typeof__((sch)->ops.op(task0, task1, ##args)) __ret; \ 368 BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL); \ 369 current->scx.kf_tasks[0] = task0; \ 370 current->scx.kf_tasks[1] = task1; \ 371 __ret = SCX_CALL_OP_RET((sch), mask, op, rq, task0, task1, ##args); \ 372 current->scx.kf_tasks[0] = NULL; \ 373 current->scx.kf_tasks[1] = NULL; \ 374 __ret; \ 375 }) 376 377 /* @mask is constant, always inline to cull unnecessary branches */ 378 static __always_inline bool scx_kf_allowed(struct scx_sched *sch, u32 mask) 379 { 380 if (unlikely(!(current->scx.kf_mask & mask))) { 381 scx_error(sch, "kfunc with mask 0x%x called from an operation only allowing 0x%x", 382 mask, current->scx.kf_mask); 383 return false; 384 } 385 386 /* 387 * Enforce nesting boundaries. e.g. A kfunc which can be called from 388 * DISPATCH must not be called if we're running DEQUEUE which is nested 389 * inside ops.dispatch(). We don't need to check boundaries for any 390 * blocking kfuncs as the verifier ensures they're only called from 391 * sleepable progs. 392 */ 393 if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE && 394 (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) { 395 scx_error(sch, "cpu_release kfunc called from a nested operation"); 396 return false; 397 } 398 399 if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH && 400 (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) { 401 scx_error(sch, "dispatch kfunc called from a nested operation"); 402 return false; 403 } 404 405 return true; 406 } 407 408 /* see SCX_CALL_OP_TASK() */ 409 static __always_inline bool scx_kf_allowed_on_arg_tasks(struct scx_sched *sch, 410 u32 mask, 411 struct task_struct *p) 412 { 413 if (!scx_kf_allowed(sch, mask)) 414 return false; 415 416 if (unlikely((p != current->scx.kf_tasks[0] && 417 p != current->scx.kf_tasks[1]))) { 418 scx_error(sch, "called on a task not being operated on"); 419 return false; 420 } 421 422 return true; 423 } 424 425 /** 426 * nldsq_next_task - Iterate to the next task in a non-local DSQ 427 * @dsq: user dsq being iterated 428 * @cur: current position, %NULL to start iteration 429 * @rev: walk backwards 430 * 431 * Returns %NULL when iteration is finished. 432 */ 433 static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq, 434 struct task_struct *cur, bool rev) 435 { 436 struct list_head *list_node; 437 struct scx_dsq_list_node *dsq_lnode; 438 439 lockdep_assert_held(&dsq->lock); 440 441 if (cur) 442 list_node = &cur->scx.dsq_list.node; 443 else 444 list_node = &dsq->list; 445 446 /* find the next task, need to skip BPF iteration cursors */ 447 do { 448 if (rev) 449 list_node = list_node->prev; 450 else 451 list_node = list_node->next; 452 453 if (list_node == &dsq->list) 454 return NULL; 455 456 dsq_lnode = container_of(list_node, struct scx_dsq_list_node, 457 node); 458 } while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR); 459 460 return container_of(dsq_lnode, struct task_struct, scx.dsq_list); 461 } 462 463 #define nldsq_for_each_task(p, dsq) \ 464 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \ 465 (p) = nldsq_next_task((dsq), (p), false)) 466 467 468 /* 469 * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse] 470 * dispatch order. BPF-visible iterator is opaque and larger to allow future 471 * changes without breaking backward compatibility. Can be used with 472 * bpf_for_each(). See bpf_iter_scx_dsq_*(). 473 */ 474 enum scx_dsq_iter_flags { 475 /* iterate in the reverse dispatch order */ 476 SCX_DSQ_ITER_REV = 1U << 16, 477 478 __SCX_DSQ_ITER_HAS_SLICE = 1U << 30, 479 __SCX_DSQ_ITER_HAS_VTIME = 1U << 31, 480 481 __SCX_DSQ_ITER_USER_FLAGS = SCX_DSQ_ITER_REV, 482 __SCX_DSQ_ITER_ALL_FLAGS = __SCX_DSQ_ITER_USER_FLAGS | 483 __SCX_DSQ_ITER_HAS_SLICE | 484 __SCX_DSQ_ITER_HAS_VTIME, 485 }; 486 487 struct bpf_iter_scx_dsq_kern { 488 struct scx_dsq_list_node cursor; 489 struct scx_dispatch_q *dsq; 490 u64 slice; 491 u64 vtime; 492 } __attribute__((aligned(8))); 493 494 struct bpf_iter_scx_dsq { 495 u64 __opaque[6]; 496 } __attribute__((aligned(8))); 497 498 499 /* 500 * SCX task iterator. 501 */ 502 struct scx_task_iter { 503 struct sched_ext_entity cursor; 504 struct task_struct *locked_task; 505 struct rq *rq; 506 struct rq_flags rf; 507 u32 cnt; 508 bool list_locked; 509 }; 510 511 /** 512 * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration 513 * @iter: iterator to init 514 * 515 * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter 516 * must eventually be stopped with scx_task_iter_stop(). 517 * 518 * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock() 519 * between this and the first next() call or between any two next() calls. If 520 * the locks are released between two next() calls, the caller is responsible 521 * for ensuring that the task being iterated remains accessible either through 522 * RCU read lock or obtaining a reference count. 523 * 524 * All tasks which existed when the iteration started are guaranteed to be 525 * visited as long as they are not dead. 526 */ 527 static void scx_task_iter_start(struct scx_task_iter *iter) 528 { 529 memset(iter, 0, sizeof(*iter)); 530 531 raw_spin_lock_irq(&scx_tasks_lock); 532 533 iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR }; 534 list_add(&iter->cursor.tasks_node, &scx_tasks); 535 iter->list_locked = true; 536 } 537 538 static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter) 539 { 540 if (iter->locked_task) { 541 task_rq_unlock(iter->rq, iter->locked_task, &iter->rf); 542 iter->locked_task = NULL; 543 } 544 } 545 546 /** 547 * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator 548 * @iter: iterator to unlock 549 * 550 * If @iter is in the middle of a locked iteration, it may be locking the rq of 551 * the task currently being visited in addition to scx_tasks_lock. Unlock both. 552 * This function can be safely called anytime during an iteration. The next 553 * iterator operation will automatically restore the necessary locking. 554 */ 555 static void scx_task_iter_unlock(struct scx_task_iter *iter) 556 { 557 __scx_task_iter_rq_unlock(iter); 558 if (iter->list_locked) { 559 iter->list_locked = false; 560 raw_spin_unlock_irq(&scx_tasks_lock); 561 } 562 } 563 564 static void __scx_task_iter_maybe_relock(struct scx_task_iter *iter) 565 { 566 if (!iter->list_locked) { 567 raw_spin_lock_irq(&scx_tasks_lock); 568 iter->list_locked = true; 569 } 570 } 571 572 /** 573 * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock 574 * @iter: iterator to exit 575 * 576 * Exit a previously initialized @iter. Must be called with scx_tasks_lock held 577 * which is released on return. If the iterator holds a task's rq lock, that rq 578 * lock is also released. See scx_task_iter_start() for details. 579 */ 580 static void scx_task_iter_stop(struct scx_task_iter *iter) 581 { 582 __scx_task_iter_maybe_relock(iter); 583 list_del_init(&iter->cursor.tasks_node); 584 scx_task_iter_unlock(iter); 585 } 586 587 /** 588 * scx_task_iter_next - Next task 589 * @iter: iterator to walk 590 * 591 * Visit the next task. See scx_task_iter_start() for details. Locks are dropped 592 * and re-acquired every %SCX_TASK_ITER_BATCH iterations to avoid causing stalls 593 * by holding scx_tasks_lock for too long. 594 */ 595 static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter) 596 { 597 struct list_head *cursor = &iter->cursor.tasks_node; 598 struct sched_ext_entity *pos; 599 600 if (!(++iter->cnt % SCX_TASK_ITER_BATCH)) { 601 scx_task_iter_unlock(iter); 602 cond_resched(); 603 } 604 605 __scx_task_iter_maybe_relock(iter); 606 607 list_for_each_entry(pos, cursor, tasks_node) { 608 if (&pos->tasks_node == &scx_tasks) 609 return NULL; 610 if (!(pos->flags & SCX_TASK_CURSOR)) { 611 list_move(cursor, &pos->tasks_node); 612 return container_of(pos, struct task_struct, scx); 613 } 614 } 615 616 /* can't happen, should always terminate at scx_tasks above */ 617 BUG(); 618 } 619 620 /** 621 * scx_task_iter_next_locked - Next non-idle task with its rq locked 622 * @iter: iterator to walk 623 * 624 * Visit the non-idle task with its rq lock held. Allows callers to specify 625 * whether they would like to filter out dead tasks. See scx_task_iter_start() 626 * for details. 627 */ 628 static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter) 629 { 630 struct task_struct *p; 631 632 __scx_task_iter_rq_unlock(iter); 633 634 while ((p = scx_task_iter_next(iter))) { 635 /* 636 * scx_task_iter is used to prepare and move tasks into SCX 637 * while loading the BPF scheduler and vice-versa while 638 * unloading. The init_tasks ("swappers") should be excluded 639 * from the iteration because: 640 * 641 * - It's unsafe to use __setschduler_prio() on an init_task to 642 * determine the sched_class to use as it won't preserve its 643 * idle_sched_class. 644 * 645 * - ops.init/exit_task() can easily be confused if called with 646 * init_tasks as they, e.g., share PID 0. 647 * 648 * As init_tasks are never scheduled through SCX, they can be 649 * skipped safely. Note that is_idle_task() which tests %PF_IDLE 650 * doesn't work here: 651 * 652 * - %PF_IDLE may not be set for an init_task whose CPU hasn't 653 * yet been onlined. 654 * 655 * - %PF_IDLE can be set on tasks that are not init_tasks. See 656 * play_idle_precise() used by CONFIG_IDLE_INJECT. 657 * 658 * Test for idle_sched_class as only init_tasks are on it. 659 */ 660 if (p->sched_class != &idle_sched_class) 661 break; 662 } 663 if (!p) 664 return NULL; 665 666 iter->rq = task_rq_lock(p, &iter->rf); 667 iter->locked_task = p; 668 669 return p; 670 } 671 672 /** 673 * scx_add_event - Increase an event counter for 'name' by 'cnt' 674 * @sch: scx_sched to account events for 675 * @name: an event name defined in struct scx_event_stats 676 * @cnt: the number of the event occurred 677 * 678 * This can be used when preemption is not disabled. 679 */ 680 #define scx_add_event(sch, name, cnt) do { \ 681 this_cpu_add((sch)->pcpu->event_stats.name, (cnt)); \ 682 trace_sched_ext_event(#name, (cnt)); \ 683 } while(0) 684 685 /** 686 * __scx_add_event - Increase an event counter for 'name' by 'cnt' 687 * @sch: scx_sched to account events for 688 * @name: an event name defined in struct scx_event_stats 689 * @cnt: the number of the event occurred 690 * 691 * This should be used only when preemption is disabled. 692 */ 693 #define __scx_add_event(sch, name, cnt) do { \ 694 __this_cpu_add((sch)->pcpu->event_stats.name, (cnt)); \ 695 trace_sched_ext_event(#name, cnt); \ 696 } while(0) 697 698 /** 699 * scx_agg_event - Aggregate an event counter 'kind' from 'src_e' to 'dst_e' 700 * @dst_e: destination event stats 701 * @src_e: source event stats 702 * @kind: a kind of event to be aggregated 703 */ 704 #define scx_agg_event(dst_e, src_e, kind) do { \ 705 (dst_e)->kind += READ_ONCE((src_e)->kind); \ 706 } while(0) 707 708 /** 709 * scx_dump_event - Dump an event 'kind' in 'events' to 's' 710 * @s: output seq_buf 711 * @events: event stats 712 * @kind: a kind of event to dump 713 */ 714 #define scx_dump_event(s, events, kind) do { \ 715 dump_line(&(s), "%40s: %16lld", #kind, (events)->kind); \ 716 } while (0) 717 718 719 static void scx_read_events(struct scx_sched *sch, 720 struct scx_event_stats *events); 721 722 static enum scx_enable_state scx_enable_state(void) 723 { 724 return atomic_read(&scx_enable_state_var); 725 } 726 727 static enum scx_enable_state scx_set_enable_state(enum scx_enable_state to) 728 { 729 return atomic_xchg(&scx_enable_state_var, to); 730 } 731 732 static bool scx_tryset_enable_state(enum scx_enable_state to, 733 enum scx_enable_state from) 734 { 735 int from_v = from; 736 737 return atomic_try_cmpxchg(&scx_enable_state_var, &from_v, to); 738 } 739 740 /** 741 * wait_ops_state - Busy-wait the specified ops state to end 742 * @p: target task 743 * @opss: state to wait the end of 744 * 745 * Busy-wait for @p to transition out of @opss. This can only be used when the 746 * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also 747 * has load_acquire semantics to ensure that the caller can see the updates made 748 * in the enqueueing and dispatching paths. 749 */ 750 static void wait_ops_state(struct task_struct *p, unsigned long opss) 751 { 752 do { 753 cpu_relax(); 754 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss); 755 } 756 757 static inline bool __cpu_valid(s32 cpu) 758 { 759 return likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu)); 760 } 761 762 /** 763 * ops_cpu_valid - Verify a cpu number, to be used on ops input args 764 * @sch: scx_sched to abort on error 765 * @cpu: cpu number which came from a BPF ops 766 * @where: extra information reported on error 767 * 768 * @cpu is a cpu number which came from the BPF scheduler and can be any value. 769 * Verify that it is in range and one of the possible cpus. If invalid, trigger 770 * an ops error. 771 */ 772 static bool ops_cpu_valid(struct scx_sched *sch, s32 cpu, const char *where) 773 { 774 if (__cpu_valid(cpu)) { 775 return true; 776 } else { 777 scx_error(sch, "invalid CPU %d%s%s", cpu, where ? " " : "", where ?: ""); 778 return false; 779 } 780 } 781 782 /** 783 * ops_sanitize_err - Sanitize a -errno value 784 * @sch: scx_sched to error out on error 785 * @ops_name: operation to blame on failure 786 * @err: -errno value to sanitize 787 * 788 * Verify @err is a valid -errno. If not, trigger scx_error() and return 789 * -%EPROTO. This is necessary because returning a rogue -errno up the chain can 790 * cause misbehaviors. For an example, a large negative return from 791 * ops.init_task() triggers an oops when passed up the call chain because the 792 * value fails IS_ERR() test after being encoded with ERR_PTR() and then is 793 * handled as a pointer. 794 */ 795 static int ops_sanitize_err(struct scx_sched *sch, const char *ops_name, s32 err) 796 { 797 if (err < 0 && err >= -MAX_ERRNO) 798 return err; 799 800 scx_error(sch, "ops.%s() returned an invalid errno %d", ops_name, err); 801 return -EPROTO; 802 } 803 804 static void run_deferred(struct rq *rq) 805 { 806 process_ddsp_deferred_locals(rq); 807 808 if (local_read(&rq->scx.reenq_local_deferred)) { 809 local_set(&rq->scx.reenq_local_deferred, 0); 810 reenq_local(rq); 811 } 812 } 813 814 static void deferred_bal_cb_workfn(struct rq *rq) 815 { 816 run_deferred(rq); 817 } 818 819 static void deferred_irq_workfn(struct irq_work *irq_work) 820 { 821 struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work); 822 823 raw_spin_rq_lock(rq); 824 run_deferred(rq); 825 raw_spin_rq_unlock(rq); 826 } 827 828 /** 829 * schedule_deferred - Schedule execution of deferred actions on an rq 830 * @rq: target rq 831 * 832 * Schedule execution of deferred actions on @rq. Deferred actions are executed 833 * with @rq locked but unpinned, and thus can unlock @rq to e.g. migrate tasks 834 * to other rqs. 835 */ 836 static void schedule_deferred(struct rq *rq) 837 { 838 /* 839 * Queue an irq work. They are executed on IRQ re-enable which may take 840 * a bit longer than the scheduler hook in schedule_deferred_locked(). 841 */ 842 irq_work_queue(&rq->scx.deferred_irq_work); 843 } 844 845 /** 846 * schedule_deferred_locked - Schedule execution of deferred actions on an rq 847 * @rq: target rq 848 * 849 * Schedule execution of deferred actions on @rq. Equivalent to 850 * schedule_deferred() but requires @rq to be locked and can be more efficient. 851 */ 852 static void schedule_deferred_locked(struct rq *rq) 853 { 854 lockdep_assert_rq_held(rq); 855 856 /* 857 * If in the middle of waking up a task, task_woken_scx() will be called 858 * afterwards which will then run the deferred actions, no need to 859 * schedule anything. 860 */ 861 if (rq->scx.flags & SCX_RQ_IN_WAKEUP) 862 return; 863 864 /* Don't do anything if there already is a deferred operation. */ 865 if (rq->scx.flags & SCX_RQ_BAL_CB_PENDING) 866 return; 867 868 /* 869 * If in balance, the balance callbacks will be called before rq lock is 870 * released. Schedule one. 871 * 872 * 873 * We can't directly insert the callback into the 874 * rq's list: The call can drop its lock and make the pending balance 875 * callback visible to unrelated code paths that call rq_pin_lock(). 876 * 877 * Just let balance_one() know that it must do it itself. 878 */ 879 if (rq->scx.flags & SCX_RQ_IN_BALANCE) { 880 rq->scx.flags |= SCX_RQ_BAL_CB_PENDING; 881 return; 882 } 883 884 /* 885 * No scheduler hooks available. Use the generic irq_work path. The 886 * above WAKEUP and BALANCE paths should cover most of the cases and the 887 * time to IRQ re-enable shouldn't be long. 888 */ 889 schedule_deferred(rq); 890 } 891 892 /** 893 * touch_core_sched - Update timestamp used for core-sched task ordering 894 * @rq: rq to read clock from, must be locked 895 * @p: task to update the timestamp for 896 * 897 * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to 898 * implement global or local-DSQ FIFO ordering for core-sched. Should be called 899 * when a task becomes runnable and its turn on the CPU ends (e.g. slice 900 * exhaustion). 901 */ 902 static void touch_core_sched(struct rq *rq, struct task_struct *p) 903 { 904 lockdep_assert_rq_held(rq); 905 906 #ifdef CONFIG_SCHED_CORE 907 /* 908 * It's okay to update the timestamp spuriously. Use 909 * sched_core_disabled() which is cheaper than enabled(). 910 * 911 * As this is used to determine ordering between tasks of sibling CPUs, 912 * it may be better to use per-core dispatch sequence instead. 913 */ 914 if (!sched_core_disabled()) 915 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq)); 916 #endif 917 } 918 919 /** 920 * touch_core_sched_dispatch - Update core-sched timestamp on dispatch 921 * @rq: rq to read clock from, must be locked 922 * @p: task being dispatched 923 * 924 * If the BPF scheduler implements custom core-sched ordering via 925 * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO 926 * ordering within each local DSQ. This function is called from dispatch paths 927 * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect. 928 */ 929 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p) 930 { 931 lockdep_assert_rq_held(rq); 932 933 #ifdef CONFIG_SCHED_CORE 934 if (unlikely(SCX_HAS_OP(scx_root, core_sched_before))) 935 touch_core_sched(rq, p); 936 #endif 937 } 938 939 static void update_curr_scx(struct rq *rq) 940 { 941 struct task_struct *curr = rq->curr; 942 s64 delta_exec; 943 944 delta_exec = update_curr_common(rq); 945 if (unlikely(delta_exec <= 0)) 946 return; 947 948 if (curr->scx.slice != SCX_SLICE_INF) { 949 curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec); 950 if (!curr->scx.slice) 951 touch_core_sched(rq, curr); 952 } 953 } 954 955 static bool scx_dsq_priq_less(struct rb_node *node_a, 956 const struct rb_node *node_b) 957 { 958 const struct task_struct *a = 959 container_of(node_a, struct task_struct, scx.dsq_priq); 960 const struct task_struct *b = 961 container_of(node_b, struct task_struct, scx.dsq_priq); 962 963 return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime); 964 } 965 966 static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta) 967 { 968 /* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */ 969 WRITE_ONCE(dsq->nr, dsq->nr + delta); 970 } 971 972 static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p) 973 { 974 p->scx.slice = READ_ONCE(scx_slice_dfl); 975 __scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1); 976 } 977 978 static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq, 979 struct task_struct *p, u64 enq_flags) 980 { 981 bool is_local = dsq->id == SCX_DSQ_LOCAL; 982 983 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); 984 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) || 985 !RB_EMPTY_NODE(&p->scx.dsq_priq)); 986 987 if (!is_local) { 988 raw_spin_lock_nested(&dsq->lock, 989 (enq_flags & SCX_ENQ_NESTED) ? SINGLE_DEPTH_NESTING : 0); 990 991 if (unlikely(dsq->id == SCX_DSQ_INVALID)) { 992 scx_error(sch, "attempting to dispatch to a destroyed dsq"); 993 /* fall back to the global dsq */ 994 raw_spin_unlock(&dsq->lock); 995 dsq = find_global_dsq(sch, p); 996 raw_spin_lock(&dsq->lock); 997 } 998 } 999 1000 if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) && 1001 (enq_flags & SCX_ENQ_DSQ_PRIQ))) { 1002 /* 1003 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from 1004 * their FIFO queues. To avoid confusion and accidentally 1005 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we 1006 * disallow any internal DSQ from doing vtime ordering of 1007 * tasks. 1008 */ 1009 scx_error(sch, "cannot use vtime ordering for built-in DSQs"); 1010 enq_flags &= ~SCX_ENQ_DSQ_PRIQ; 1011 } 1012 1013 if (enq_flags & SCX_ENQ_DSQ_PRIQ) { 1014 struct rb_node *rbp; 1015 1016 /* 1017 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are 1018 * linked to both the rbtree and list on PRIQs, this can only be 1019 * tested easily when adding the first task. 1020 */ 1021 if (unlikely(RB_EMPTY_ROOT(&dsq->priq) && 1022 nldsq_next_task(dsq, NULL, false))) 1023 scx_error(sch, "DSQ ID 0x%016llx already had FIFO-enqueued tasks", 1024 dsq->id); 1025 1026 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ; 1027 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less); 1028 1029 /* 1030 * Find the previous task and insert after it on the list so 1031 * that @dsq->list is vtime ordered. 1032 */ 1033 rbp = rb_prev(&p->scx.dsq_priq); 1034 if (rbp) { 1035 struct task_struct *prev = 1036 container_of(rbp, struct task_struct, 1037 scx.dsq_priq); 1038 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node); 1039 /* first task unchanged - no update needed */ 1040 } else { 1041 list_add(&p->scx.dsq_list.node, &dsq->list); 1042 /* not builtin and new task is at head - use fastpath */ 1043 rcu_assign_pointer(dsq->first_task, p); 1044 } 1045 } else { 1046 /* a FIFO DSQ shouldn't be using PRIQ enqueuing */ 1047 if (unlikely(!RB_EMPTY_ROOT(&dsq->priq))) 1048 scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks", 1049 dsq->id); 1050 1051 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) { 1052 list_add(&p->scx.dsq_list.node, &dsq->list); 1053 /* new task inserted at head - use fastpath */ 1054 if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN)) 1055 rcu_assign_pointer(dsq->first_task, p); 1056 } else { 1057 bool was_empty; 1058 1059 was_empty = list_empty(&dsq->list); 1060 list_add_tail(&p->scx.dsq_list.node, &dsq->list); 1061 if (was_empty && !(dsq->id & SCX_DSQ_FLAG_BUILTIN)) 1062 rcu_assign_pointer(dsq->first_task, p); 1063 } 1064 } 1065 1066 /* seq records the order tasks are queued, used by BPF DSQ iterator */ 1067 dsq->seq++; 1068 p->scx.dsq_seq = dsq->seq; 1069 1070 dsq_mod_nr(dsq, 1); 1071 p->scx.dsq = dsq; 1072 1073 /* 1074 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the 1075 * direct dispatch path, but we clear them here because the direct 1076 * dispatch verdict may be overridden on the enqueue path during e.g. 1077 * bypass. 1078 */ 1079 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID; 1080 p->scx.ddsp_enq_flags = 0; 1081 1082 /* 1083 * We're transitioning out of QUEUEING or DISPATCHING. store_release to 1084 * match waiters' load_acquire. 1085 */ 1086 if (enq_flags & SCX_ENQ_CLEAR_OPSS) 1087 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1088 1089 if (is_local) { 1090 struct rq *rq = container_of(dsq, struct rq, scx.local_dsq); 1091 bool preempt = false; 1092 1093 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr && 1094 rq->curr->sched_class == &ext_sched_class) { 1095 rq->curr->scx.slice = 0; 1096 preempt = true; 1097 } 1098 1099 if (preempt || sched_class_above(&ext_sched_class, 1100 rq->curr->sched_class)) 1101 resched_curr(rq); 1102 } else { 1103 raw_spin_unlock(&dsq->lock); 1104 } 1105 } 1106 1107 static void task_unlink_from_dsq(struct task_struct *p, 1108 struct scx_dispatch_q *dsq) 1109 { 1110 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node)); 1111 1112 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) { 1113 rb_erase(&p->scx.dsq_priq, &dsq->priq); 1114 RB_CLEAR_NODE(&p->scx.dsq_priq); 1115 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ; 1116 } 1117 1118 list_del_init(&p->scx.dsq_list.node); 1119 dsq_mod_nr(dsq, -1); 1120 1121 if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN) && dsq->first_task == p) { 1122 struct task_struct *first_task; 1123 1124 first_task = nldsq_next_task(dsq, NULL, false); 1125 rcu_assign_pointer(dsq->first_task, first_task); 1126 } 1127 } 1128 1129 static void dispatch_dequeue(struct rq *rq, struct task_struct *p) 1130 { 1131 struct scx_dispatch_q *dsq = p->scx.dsq; 1132 bool is_local = dsq == &rq->scx.local_dsq; 1133 1134 lockdep_assert_rq_held(rq); 1135 1136 if (!dsq) { 1137 /* 1138 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals. 1139 * Unlinking is all that's needed to cancel. 1140 */ 1141 if (unlikely(!list_empty(&p->scx.dsq_list.node))) 1142 list_del_init(&p->scx.dsq_list.node); 1143 1144 /* 1145 * When dispatching directly from the BPF scheduler to a local 1146 * DSQ, the task isn't associated with any DSQ but 1147 * @p->scx.holding_cpu may be set under the protection of 1148 * %SCX_OPSS_DISPATCHING. 1149 */ 1150 if (p->scx.holding_cpu >= 0) 1151 p->scx.holding_cpu = -1; 1152 1153 return; 1154 } 1155 1156 if (!is_local) 1157 raw_spin_lock(&dsq->lock); 1158 1159 /* 1160 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't 1161 * change underneath us. 1162 */ 1163 if (p->scx.holding_cpu < 0) { 1164 /* @p must still be on @dsq, dequeue */ 1165 task_unlink_from_dsq(p, dsq); 1166 } else { 1167 /* 1168 * We're racing against dispatch_to_local_dsq() which already 1169 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the 1170 * holding_cpu which tells dispatch_to_local_dsq() that it lost 1171 * the race. 1172 */ 1173 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node)); 1174 p->scx.holding_cpu = -1; 1175 } 1176 p->scx.dsq = NULL; 1177 1178 if (!is_local) 1179 raw_spin_unlock(&dsq->lock); 1180 } 1181 1182 /* 1183 * Abbreviated version of dispatch_dequeue() that can be used when both @p's rq 1184 * and dsq are locked. 1185 */ 1186 static void dispatch_dequeue_locked(struct task_struct *p, 1187 struct scx_dispatch_q *dsq) 1188 { 1189 lockdep_assert_rq_held(task_rq(p)); 1190 lockdep_assert_held(&dsq->lock); 1191 1192 task_unlink_from_dsq(p, dsq); 1193 p->scx.dsq = NULL; 1194 } 1195 1196 static struct scx_dispatch_q *find_dsq_for_dispatch(struct scx_sched *sch, 1197 struct rq *rq, u64 dsq_id, 1198 struct task_struct *p) 1199 { 1200 struct scx_dispatch_q *dsq; 1201 1202 if (dsq_id == SCX_DSQ_LOCAL) 1203 return &rq->scx.local_dsq; 1204 1205 if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) { 1206 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK; 1207 1208 if (!ops_cpu_valid(sch, cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict")) 1209 return find_global_dsq(sch, p); 1210 1211 return &cpu_rq(cpu)->scx.local_dsq; 1212 } 1213 1214 if (dsq_id == SCX_DSQ_GLOBAL) 1215 dsq = find_global_dsq(sch, p); 1216 else 1217 dsq = find_user_dsq(sch, dsq_id); 1218 1219 if (unlikely(!dsq)) { 1220 scx_error(sch, "non-existent DSQ 0x%llx for %s[%d]", 1221 dsq_id, p->comm, p->pid); 1222 return find_global_dsq(sch, p); 1223 } 1224 1225 return dsq; 1226 } 1227 1228 static void mark_direct_dispatch(struct scx_sched *sch, 1229 struct task_struct *ddsp_task, 1230 struct task_struct *p, u64 dsq_id, 1231 u64 enq_flags) 1232 { 1233 /* 1234 * Mark that dispatch already happened from ops.select_cpu() or 1235 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value 1236 * which can never match a valid task pointer. 1237 */ 1238 __this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH)); 1239 1240 /* @p must match the task on the enqueue path */ 1241 if (unlikely(p != ddsp_task)) { 1242 if (IS_ERR(ddsp_task)) 1243 scx_error(sch, "%s[%d] already direct-dispatched", 1244 p->comm, p->pid); 1245 else 1246 scx_error(sch, "scheduling for %s[%d] but trying to direct-dispatch %s[%d]", 1247 ddsp_task->comm, ddsp_task->pid, 1248 p->comm, p->pid); 1249 return; 1250 } 1251 1252 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID); 1253 WARN_ON_ONCE(p->scx.ddsp_enq_flags); 1254 1255 p->scx.ddsp_dsq_id = dsq_id; 1256 p->scx.ddsp_enq_flags = enq_flags; 1257 } 1258 1259 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p, 1260 u64 enq_flags) 1261 { 1262 struct rq *rq = task_rq(p); 1263 struct scx_dispatch_q *dsq = 1264 find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); 1265 1266 touch_core_sched_dispatch(rq, p); 1267 1268 p->scx.ddsp_enq_flags |= enq_flags; 1269 1270 /* 1271 * We are in the enqueue path with @rq locked and pinned, and thus can't 1272 * double lock a remote rq and enqueue to its local DSQ. For 1273 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer 1274 * the enqueue so that it's executed when @rq can be unlocked. 1275 */ 1276 if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) { 1277 unsigned long opss; 1278 1279 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK; 1280 1281 switch (opss & SCX_OPSS_STATE_MASK) { 1282 case SCX_OPSS_NONE: 1283 break; 1284 case SCX_OPSS_QUEUEING: 1285 /* 1286 * As @p was never passed to the BPF side, _release is 1287 * not strictly necessary. Still do it for consistency. 1288 */ 1289 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1290 break; 1291 default: 1292 WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()", 1293 p->comm, p->pid, opss); 1294 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1295 break; 1296 } 1297 1298 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); 1299 list_add_tail(&p->scx.dsq_list.node, 1300 &rq->scx.ddsp_deferred_locals); 1301 schedule_deferred_locked(rq); 1302 return; 1303 } 1304 1305 dispatch_enqueue(sch, dsq, p, 1306 p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS); 1307 } 1308 1309 static bool scx_rq_online(struct rq *rq) 1310 { 1311 /* 1312 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates 1313 * the online state as seen from the BPF scheduler. cpu_active() test 1314 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will 1315 * stay set until the current scheduling operation is complete even if 1316 * we aren't locking @rq. 1317 */ 1318 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq))); 1319 } 1320 1321 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, 1322 int sticky_cpu) 1323 { 1324 struct scx_sched *sch = scx_root; 1325 struct task_struct **ddsp_taskp; 1326 struct scx_dispatch_q *dsq; 1327 unsigned long qseq; 1328 1329 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED)); 1330 1331 /* rq migration */ 1332 if (sticky_cpu == cpu_of(rq)) 1333 goto local_norefill; 1334 1335 /* 1336 * If !scx_rq_online(), we already told the BPF scheduler that the CPU 1337 * is offline and are just running the hotplug path. Don't bother the 1338 * BPF scheduler. 1339 */ 1340 if (!scx_rq_online(rq)) 1341 goto local; 1342 1343 if (scx_rq_bypassing(rq)) { 1344 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1); 1345 goto bypass; 1346 } 1347 1348 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) 1349 goto direct; 1350 1351 /* see %SCX_OPS_ENQ_EXITING */ 1352 if (!(sch->ops.flags & SCX_OPS_ENQ_EXITING) && 1353 unlikely(p->flags & PF_EXITING)) { 1354 __scx_add_event(sch, SCX_EV_ENQ_SKIP_EXITING, 1); 1355 goto local; 1356 } 1357 1358 /* see %SCX_OPS_ENQ_MIGRATION_DISABLED */ 1359 if (!(sch->ops.flags & SCX_OPS_ENQ_MIGRATION_DISABLED) && 1360 is_migration_disabled(p)) { 1361 __scx_add_event(sch, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1); 1362 goto local; 1363 } 1364 1365 if (unlikely(!SCX_HAS_OP(sch, enqueue))) 1366 goto global; 1367 1368 /* DSQ bypass didn't trigger, enqueue on the BPF scheduler */ 1369 qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT; 1370 1371 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); 1372 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq); 1373 1374 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task); 1375 WARN_ON_ONCE(*ddsp_taskp); 1376 *ddsp_taskp = p; 1377 1378 SCX_CALL_OP_TASK(sch, SCX_KF_ENQUEUE, enqueue, rq, p, enq_flags); 1379 1380 *ddsp_taskp = NULL; 1381 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) 1382 goto direct; 1383 1384 /* 1385 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or 1386 * dequeue may be waiting. The store_release matches their load_acquire. 1387 */ 1388 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq); 1389 return; 1390 1391 direct: 1392 direct_dispatch(sch, p, enq_flags); 1393 return; 1394 local_norefill: 1395 dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags); 1396 return; 1397 local: 1398 dsq = &rq->scx.local_dsq; 1399 goto enqueue; 1400 global: 1401 dsq = find_global_dsq(sch, p); 1402 goto enqueue; 1403 bypass: 1404 dsq = &task_rq(p)->scx.bypass_dsq; 1405 goto enqueue; 1406 1407 enqueue: 1408 /* 1409 * For task-ordering, slice refill must be treated as implying the end 1410 * of the current slice. Otherwise, the longer @p stays on the CPU, the 1411 * higher priority it becomes from scx_prio_less()'s POV. 1412 */ 1413 touch_core_sched(rq, p); 1414 refill_task_slice_dfl(sch, p); 1415 dispatch_enqueue(sch, dsq, p, enq_flags); 1416 } 1417 1418 static bool task_runnable(const struct task_struct *p) 1419 { 1420 return !list_empty(&p->scx.runnable_node); 1421 } 1422 1423 static void set_task_runnable(struct rq *rq, struct task_struct *p) 1424 { 1425 lockdep_assert_rq_held(rq); 1426 1427 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) { 1428 p->scx.runnable_at = jiffies; 1429 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT; 1430 } 1431 1432 /* 1433 * list_add_tail() must be used. scx_bypass() depends on tasks being 1434 * appended to the runnable_list. 1435 */ 1436 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list); 1437 } 1438 1439 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at) 1440 { 1441 list_del_init(&p->scx.runnable_node); 1442 if (reset_runnable_at) 1443 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; 1444 } 1445 1446 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags) 1447 { 1448 struct scx_sched *sch = scx_root; 1449 int sticky_cpu = p->scx.sticky_cpu; 1450 1451 if (enq_flags & ENQUEUE_WAKEUP) 1452 rq->scx.flags |= SCX_RQ_IN_WAKEUP; 1453 1454 enq_flags |= rq->scx.extra_enq_flags; 1455 1456 if (sticky_cpu >= 0) 1457 p->scx.sticky_cpu = -1; 1458 1459 /* 1460 * Restoring a running task will be immediately followed by 1461 * set_next_task_scx() which expects the task to not be on the BPF 1462 * scheduler as tasks can only start running through local DSQs. Force 1463 * direct-dispatch into the local DSQ by setting the sticky_cpu. 1464 */ 1465 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p)) 1466 sticky_cpu = cpu_of(rq); 1467 1468 if (p->scx.flags & SCX_TASK_QUEUED) { 1469 WARN_ON_ONCE(!task_runnable(p)); 1470 goto out; 1471 } 1472 1473 set_task_runnable(rq, p); 1474 p->scx.flags |= SCX_TASK_QUEUED; 1475 rq->scx.nr_running++; 1476 add_nr_running(rq, 1); 1477 1478 if (SCX_HAS_OP(sch, runnable) && !task_on_rq_migrating(p)) 1479 SCX_CALL_OP_TASK(sch, SCX_KF_REST, runnable, rq, p, enq_flags); 1480 1481 if (enq_flags & SCX_ENQ_WAKEUP) 1482 touch_core_sched(rq, p); 1483 1484 do_enqueue_task(rq, p, enq_flags, sticky_cpu); 1485 out: 1486 rq->scx.flags &= ~SCX_RQ_IN_WAKEUP; 1487 1488 if ((enq_flags & SCX_ENQ_CPU_SELECTED) && 1489 unlikely(cpu_of(rq) != p->scx.selected_cpu)) 1490 __scx_add_event(sch, SCX_EV_SELECT_CPU_FALLBACK, 1); 1491 } 1492 1493 static void ops_dequeue(struct rq *rq, struct task_struct *p, u64 deq_flags) 1494 { 1495 struct scx_sched *sch = scx_root; 1496 unsigned long opss; 1497 1498 /* dequeue is always temporary, don't reset runnable_at */ 1499 clr_task_runnable(p, false); 1500 1501 /* acquire ensures that we see the preceding updates on QUEUED */ 1502 opss = atomic_long_read_acquire(&p->scx.ops_state); 1503 1504 switch (opss & SCX_OPSS_STATE_MASK) { 1505 case SCX_OPSS_NONE: 1506 break; 1507 case SCX_OPSS_QUEUEING: 1508 /* 1509 * QUEUEING is started and finished while holding @p's rq lock. 1510 * As we're holding the rq lock now, we shouldn't see QUEUEING. 1511 */ 1512 BUG(); 1513 case SCX_OPSS_QUEUED: 1514 if (SCX_HAS_OP(sch, dequeue)) 1515 SCX_CALL_OP_TASK(sch, SCX_KF_REST, dequeue, rq, 1516 p, deq_flags); 1517 1518 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, 1519 SCX_OPSS_NONE)) 1520 break; 1521 fallthrough; 1522 case SCX_OPSS_DISPATCHING: 1523 /* 1524 * If @p is being dispatched from the BPF scheduler to a DSQ, 1525 * wait for the transfer to complete so that @p doesn't get 1526 * added to its DSQ after dequeueing is complete. 1527 * 1528 * As we're waiting on DISPATCHING with the rq locked, the 1529 * dispatching side shouldn't try to lock the rq while 1530 * DISPATCHING is set. See dispatch_to_local_dsq(). 1531 * 1532 * DISPATCHING shouldn't have qseq set and control can reach 1533 * here with NONE @opss from the above QUEUED case block. 1534 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss. 1535 */ 1536 wait_ops_state(p, SCX_OPSS_DISPATCHING); 1537 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); 1538 break; 1539 } 1540 } 1541 1542 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags) 1543 { 1544 struct scx_sched *sch = scx_root; 1545 1546 if (!(p->scx.flags & SCX_TASK_QUEUED)) { 1547 WARN_ON_ONCE(task_runnable(p)); 1548 return true; 1549 } 1550 1551 ops_dequeue(rq, p, deq_flags); 1552 1553 /* 1554 * A currently running task which is going off @rq first gets dequeued 1555 * and then stops running. As we want running <-> stopping transitions 1556 * to be contained within runnable <-> quiescent transitions, trigger 1557 * ->stopping() early here instead of in put_prev_task_scx(). 1558 * 1559 * @p may go through multiple stopping <-> running transitions between 1560 * here and put_prev_task_scx() if task attribute changes occur while 1561 * balance_scx() leaves @rq unlocked. However, they don't contain any 1562 * information meaningful to the BPF scheduler and can be suppressed by 1563 * skipping the callbacks if the task is !QUEUED. 1564 */ 1565 if (SCX_HAS_OP(sch, stopping) && task_current(rq, p)) { 1566 update_curr_scx(rq); 1567 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, false); 1568 } 1569 1570 if (SCX_HAS_OP(sch, quiescent) && !task_on_rq_migrating(p)) 1571 SCX_CALL_OP_TASK(sch, SCX_KF_REST, quiescent, rq, p, deq_flags); 1572 1573 if (deq_flags & SCX_DEQ_SLEEP) 1574 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP; 1575 else 1576 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP; 1577 1578 p->scx.flags &= ~SCX_TASK_QUEUED; 1579 rq->scx.nr_running--; 1580 sub_nr_running(rq, 1); 1581 1582 dispatch_dequeue(rq, p); 1583 return true; 1584 } 1585 1586 static void yield_task_scx(struct rq *rq) 1587 { 1588 struct scx_sched *sch = scx_root; 1589 struct task_struct *p = rq->donor; 1590 1591 if (SCX_HAS_OP(sch, yield)) 1592 SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, p, NULL); 1593 else 1594 p->scx.slice = 0; 1595 } 1596 1597 static bool yield_to_task_scx(struct rq *rq, struct task_struct *to) 1598 { 1599 struct scx_sched *sch = scx_root; 1600 struct task_struct *from = rq->donor; 1601 1602 if (SCX_HAS_OP(sch, yield)) 1603 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, 1604 from, to); 1605 else 1606 return false; 1607 } 1608 1609 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags, 1610 struct scx_dispatch_q *src_dsq, 1611 struct rq *dst_rq) 1612 { 1613 struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq; 1614 1615 /* @dsq is locked and @p is on @dst_rq */ 1616 lockdep_assert_held(&src_dsq->lock); 1617 lockdep_assert_rq_held(dst_rq); 1618 1619 WARN_ON_ONCE(p->scx.holding_cpu >= 0); 1620 1621 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) 1622 list_add(&p->scx.dsq_list.node, &dst_dsq->list); 1623 else 1624 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list); 1625 1626 dsq_mod_nr(dst_dsq, 1); 1627 p->scx.dsq = dst_dsq; 1628 } 1629 1630 /** 1631 * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ 1632 * @p: task to move 1633 * @enq_flags: %SCX_ENQ_* 1634 * @src_rq: rq to move the task from, locked on entry, released on return 1635 * @dst_rq: rq to move the task into, locked on return 1636 * 1637 * Move @p which is currently on @src_rq to @dst_rq's local DSQ. 1638 */ 1639 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, 1640 struct rq *src_rq, struct rq *dst_rq) 1641 { 1642 lockdep_assert_rq_held(src_rq); 1643 1644 /* the following marks @p MIGRATING which excludes dequeue */ 1645 deactivate_task(src_rq, p, 0); 1646 set_task_cpu(p, cpu_of(dst_rq)); 1647 p->scx.sticky_cpu = cpu_of(dst_rq); 1648 1649 raw_spin_rq_unlock(src_rq); 1650 raw_spin_rq_lock(dst_rq); 1651 1652 /* 1653 * We want to pass scx-specific enq_flags but activate_task() will 1654 * truncate the upper 32 bit. As we own @rq, we can pass them through 1655 * @rq->scx.extra_enq_flags instead. 1656 */ 1657 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr)); 1658 WARN_ON_ONCE(dst_rq->scx.extra_enq_flags); 1659 dst_rq->scx.extra_enq_flags = enq_flags; 1660 activate_task(dst_rq, p, 0); 1661 dst_rq->scx.extra_enq_flags = 0; 1662 } 1663 1664 /* 1665 * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two 1666 * differences: 1667 * 1668 * - is_cpu_allowed() asks "Can this task run on this CPU?" while 1669 * task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to 1670 * this CPU?". 1671 * 1672 * While migration is disabled, is_cpu_allowed() has to say "yes" as the task 1673 * must be allowed to finish on the CPU that it's currently on regardless of 1674 * the CPU state. However, task_can_run_on_remote_rq() must say "no" as the 1675 * BPF scheduler shouldn't attempt to migrate a task which has migration 1676 * disabled. 1677 * 1678 * - The BPF scheduler is bypassed while the rq is offline and we can always say 1679 * no to the BPF scheduler initiated migrations while offline. 1680 * 1681 * The caller must ensure that @p and @rq are on different CPUs. 1682 */ 1683 static bool task_can_run_on_remote_rq(struct scx_sched *sch, 1684 struct task_struct *p, struct rq *rq, 1685 bool enforce) 1686 { 1687 int cpu = cpu_of(rq); 1688 1689 WARN_ON_ONCE(task_cpu(p) == cpu); 1690 1691 /* 1692 * If @p has migration disabled, @p->cpus_ptr is updated to contain only 1693 * the pinned CPU in migrate_disable_switch() while @p is being switched 1694 * out. However, put_prev_task_scx() is called before @p->cpus_ptr is 1695 * updated and thus another CPU may see @p on a DSQ inbetween leading to 1696 * @p passing the below task_allowed_on_cpu() check while migration is 1697 * disabled. 1698 * 1699 * Test the migration disabled state first as the race window is narrow 1700 * and the BPF scheduler failing to check migration disabled state can 1701 * easily be masked if task_allowed_on_cpu() is done first. 1702 */ 1703 if (unlikely(is_migration_disabled(p))) { 1704 if (enforce) 1705 scx_error(sch, "SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d", 1706 p->comm, p->pid, task_cpu(p), cpu); 1707 return false; 1708 } 1709 1710 /* 1711 * We don't require the BPF scheduler to avoid dispatching to offline 1712 * CPUs mostly for convenience but also because CPUs can go offline 1713 * between scx_bpf_dsq_insert() calls and here. Trigger error iff the 1714 * picked CPU is outside the allowed mask. 1715 */ 1716 if (!task_allowed_on_cpu(p, cpu)) { 1717 if (enforce) 1718 scx_error(sch, "SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]", 1719 cpu, p->comm, p->pid); 1720 return false; 1721 } 1722 1723 if (!scx_rq_online(rq)) { 1724 if (enforce) 1725 __scx_add_event(sch, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1); 1726 return false; 1727 } 1728 1729 return true; 1730 } 1731 1732 /** 1733 * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq 1734 * @p: target task 1735 * @dsq: locked DSQ @p is currently on 1736 * @src_rq: rq @p is currently on, stable with @dsq locked 1737 * 1738 * Called with @dsq locked but no rq's locked. We want to move @p to a different 1739 * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is 1740 * required when transferring into a local DSQ. Even when transferring into a 1741 * non-local DSQ, it's better to use the same mechanism to protect against 1742 * dequeues and maintain the invariant that @p->scx.dsq can only change while 1743 * @src_rq is locked, which e.g. scx_dump_task() depends on. 1744 * 1745 * We want to grab @src_rq but that can deadlock if we try while locking @dsq, 1746 * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As 1747 * this may race with dequeue, which can't drop the rq lock or fail, do a little 1748 * dancing from our side. 1749 * 1750 * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets 1751 * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu 1752 * would be cleared to -1. While other cpus may have updated it to different 1753 * values afterwards, as this operation can't be preempted or recurse, the 1754 * holding_cpu can never become this CPU again before we're done. Thus, we can 1755 * tell whether we lost to dequeue by testing whether the holding_cpu still 1756 * points to this CPU. See dispatch_dequeue() for the counterpart. 1757 * 1758 * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is 1759 * still valid. %false if lost to dequeue. 1760 */ 1761 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p, 1762 struct scx_dispatch_q *dsq, 1763 struct rq *src_rq) 1764 { 1765 s32 cpu = raw_smp_processor_id(); 1766 1767 lockdep_assert_held(&dsq->lock); 1768 1769 WARN_ON_ONCE(p->scx.holding_cpu >= 0); 1770 task_unlink_from_dsq(p, dsq); 1771 p->scx.holding_cpu = cpu; 1772 1773 raw_spin_unlock(&dsq->lock); 1774 raw_spin_rq_lock(src_rq); 1775 1776 /* task_rq couldn't have changed if we're still the holding cpu */ 1777 return likely(p->scx.holding_cpu == cpu) && 1778 !WARN_ON_ONCE(src_rq != task_rq(p)); 1779 } 1780 1781 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p, 1782 struct scx_dispatch_q *dsq, struct rq *src_rq) 1783 { 1784 raw_spin_rq_unlock(this_rq); 1785 1786 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) { 1787 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq); 1788 return true; 1789 } else { 1790 raw_spin_rq_unlock(src_rq); 1791 raw_spin_rq_lock(this_rq); 1792 return false; 1793 } 1794 } 1795 1796 /** 1797 * move_task_between_dsqs() - Move a task from one DSQ to another 1798 * @sch: scx_sched being operated on 1799 * @p: target task 1800 * @enq_flags: %SCX_ENQ_* 1801 * @src_dsq: DSQ @p is currently on, must not be a local DSQ 1802 * @dst_dsq: DSQ @p is being moved to, can be any DSQ 1803 * 1804 * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local 1805 * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq 1806 * will change. As @p's task_rq is locked, this function doesn't need to use the 1807 * holding_cpu mechanism. 1808 * 1809 * On return, @src_dsq is unlocked and only @p's new task_rq, which is the 1810 * return value, is locked. 1811 */ 1812 static struct rq *move_task_between_dsqs(struct scx_sched *sch, 1813 struct task_struct *p, u64 enq_flags, 1814 struct scx_dispatch_q *src_dsq, 1815 struct scx_dispatch_q *dst_dsq) 1816 { 1817 struct rq *src_rq = task_rq(p), *dst_rq; 1818 1819 BUG_ON(src_dsq->id == SCX_DSQ_LOCAL); 1820 lockdep_assert_held(&src_dsq->lock); 1821 lockdep_assert_rq_held(src_rq); 1822 1823 if (dst_dsq->id == SCX_DSQ_LOCAL) { 1824 dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq); 1825 if (src_rq != dst_rq && 1826 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) { 1827 dst_dsq = find_global_dsq(sch, p); 1828 dst_rq = src_rq; 1829 } 1830 } else { 1831 /* no need to migrate if destination is a non-local DSQ */ 1832 dst_rq = src_rq; 1833 } 1834 1835 /* 1836 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different 1837 * CPU, @p will be migrated. 1838 */ 1839 if (dst_dsq->id == SCX_DSQ_LOCAL) { 1840 /* @p is going from a non-local DSQ to a local DSQ */ 1841 if (src_rq == dst_rq) { 1842 task_unlink_from_dsq(p, src_dsq); 1843 move_local_task_to_local_dsq(p, enq_flags, 1844 src_dsq, dst_rq); 1845 raw_spin_unlock(&src_dsq->lock); 1846 } else { 1847 raw_spin_unlock(&src_dsq->lock); 1848 move_remote_task_to_local_dsq(p, enq_flags, 1849 src_rq, dst_rq); 1850 } 1851 } else { 1852 /* 1853 * @p is going from a non-local DSQ to a non-local DSQ. As 1854 * $src_dsq is already locked, do an abbreviated dequeue. 1855 */ 1856 dispatch_dequeue_locked(p, src_dsq); 1857 raw_spin_unlock(&src_dsq->lock); 1858 1859 dispatch_enqueue(sch, dst_dsq, p, enq_flags); 1860 } 1861 1862 return dst_rq; 1863 } 1864 1865 static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq, 1866 struct scx_dispatch_q *dsq) 1867 { 1868 struct task_struct *p; 1869 retry: 1870 /* 1871 * The caller can't expect to successfully consume a task if the task's 1872 * addition to @dsq isn't guaranteed to be visible somehow. Test 1873 * @dsq->list without locking and skip if it seems empty. 1874 */ 1875 if (list_empty(&dsq->list)) 1876 return false; 1877 1878 raw_spin_lock(&dsq->lock); 1879 1880 nldsq_for_each_task(p, dsq) { 1881 struct rq *task_rq = task_rq(p); 1882 1883 /* 1884 * This loop can lead to multiple lockup scenarios, e.g. the BPF 1885 * scheduler can put an enormous number of affinitized tasks into 1886 * a contended DSQ, or the outer retry loop can repeatedly race 1887 * against scx_bypass() dequeueing tasks from @dsq trying to put 1888 * the system into the bypass mode. This can easily live-lock the 1889 * machine. If aborting, exit from all non-bypass DSQs. 1890 */ 1891 if (unlikely(READ_ONCE(scx_aborting)) && dsq->id != SCX_DSQ_BYPASS) 1892 break; 1893 1894 if (rq == task_rq) { 1895 task_unlink_from_dsq(p, dsq); 1896 move_local_task_to_local_dsq(p, 0, dsq, rq); 1897 raw_spin_unlock(&dsq->lock); 1898 return true; 1899 } 1900 1901 if (task_can_run_on_remote_rq(sch, p, rq, false)) { 1902 if (likely(consume_remote_task(rq, p, dsq, task_rq))) 1903 return true; 1904 goto retry; 1905 } 1906 } 1907 1908 raw_spin_unlock(&dsq->lock); 1909 return false; 1910 } 1911 1912 static bool consume_global_dsq(struct scx_sched *sch, struct rq *rq) 1913 { 1914 int node = cpu_to_node(cpu_of(rq)); 1915 1916 return consume_dispatch_q(sch, rq, sch->global_dsqs[node]); 1917 } 1918 1919 /** 1920 * dispatch_to_local_dsq - Dispatch a task to a local dsq 1921 * @sch: scx_sched being operated on 1922 * @rq: current rq which is locked 1923 * @dst_dsq: destination DSQ 1924 * @p: task to dispatch 1925 * @enq_flags: %SCX_ENQ_* 1926 * 1927 * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local 1928 * DSQ. This function performs all the synchronization dancing needed because 1929 * local DSQs are protected with rq locks. 1930 * 1931 * The caller must have exclusive ownership of @p (e.g. through 1932 * %SCX_OPSS_DISPATCHING). 1933 */ 1934 static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq, 1935 struct scx_dispatch_q *dst_dsq, 1936 struct task_struct *p, u64 enq_flags) 1937 { 1938 struct rq *src_rq = task_rq(p); 1939 struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq); 1940 struct rq *locked_rq = rq; 1941 1942 /* 1943 * We're synchronized against dequeue through DISPATCHING. As @p can't 1944 * be dequeued, its task_rq and cpus_allowed are stable too. 1945 * 1946 * If dispatching to @rq that @p is already on, no lock dancing needed. 1947 */ 1948 if (rq == src_rq && rq == dst_rq) { 1949 dispatch_enqueue(sch, dst_dsq, p, 1950 enq_flags | SCX_ENQ_CLEAR_OPSS); 1951 return; 1952 } 1953 1954 if (src_rq != dst_rq && 1955 unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) { 1956 dispatch_enqueue(sch, find_global_dsq(sch, p), p, 1957 enq_flags | SCX_ENQ_CLEAR_OPSS); 1958 return; 1959 } 1960 1961 /* 1962 * @p is on a possibly remote @src_rq which we need to lock to move the 1963 * task. If dequeue is in progress, it'd be locking @src_rq and waiting 1964 * on DISPATCHING, so we can't grab @src_rq lock while holding 1965 * DISPATCHING. 1966 * 1967 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that 1968 * we're moving from a DSQ and use the same mechanism - mark the task 1969 * under transfer with holding_cpu, release DISPATCHING and then follow 1970 * the same protocol. See unlink_dsq_and_lock_src_rq(). 1971 */ 1972 p->scx.holding_cpu = raw_smp_processor_id(); 1973 1974 /* store_release ensures that dequeue sees the above */ 1975 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); 1976 1977 /* switch to @src_rq lock */ 1978 if (locked_rq != src_rq) { 1979 raw_spin_rq_unlock(locked_rq); 1980 locked_rq = src_rq; 1981 raw_spin_rq_lock(src_rq); 1982 } 1983 1984 /* task_rq couldn't have changed if we're still the holding cpu */ 1985 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) && 1986 !WARN_ON_ONCE(src_rq != task_rq(p))) { 1987 /* 1988 * If @p is staying on the same rq, there's no need to go 1989 * through the full deactivate/activate cycle. Optimize by 1990 * abbreviating move_remote_task_to_local_dsq(). 1991 */ 1992 if (src_rq == dst_rq) { 1993 p->scx.holding_cpu = -1; 1994 dispatch_enqueue(sch, &dst_rq->scx.local_dsq, p, 1995 enq_flags); 1996 } else { 1997 move_remote_task_to_local_dsq(p, enq_flags, 1998 src_rq, dst_rq); 1999 /* task has been moved to dst_rq, which is now locked */ 2000 locked_rq = dst_rq; 2001 } 2002 2003 /* if the destination CPU is idle, wake it up */ 2004 if (sched_class_above(p->sched_class, dst_rq->curr->sched_class)) 2005 resched_curr(dst_rq); 2006 } 2007 2008 /* switch back to @rq lock */ 2009 if (locked_rq != rq) { 2010 raw_spin_rq_unlock(locked_rq); 2011 raw_spin_rq_lock(rq); 2012 } 2013 } 2014 2015 /** 2016 * finish_dispatch - Asynchronously finish dispatching a task 2017 * @rq: current rq which is locked 2018 * @p: task to finish dispatching 2019 * @qseq_at_dispatch: qseq when @p started getting dispatched 2020 * @dsq_id: destination DSQ ID 2021 * @enq_flags: %SCX_ENQ_* 2022 * 2023 * Dispatching to local DSQs may need to wait for queueing to complete or 2024 * require rq lock dancing. As we don't wanna do either while inside 2025 * ops.dispatch() to avoid locking order inversion, we split dispatching into 2026 * two parts. scx_bpf_dsq_insert() which is called by ops.dispatch() records the 2027 * task and its qseq. Once ops.dispatch() returns, this function is called to 2028 * finish up. 2029 * 2030 * There is no guarantee that @p is still valid for dispatching or even that it 2031 * was valid in the first place. Make sure that the task is still owned by the 2032 * BPF scheduler and claim the ownership before dispatching. 2033 */ 2034 static void finish_dispatch(struct scx_sched *sch, struct rq *rq, 2035 struct task_struct *p, 2036 unsigned long qseq_at_dispatch, 2037 u64 dsq_id, u64 enq_flags) 2038 { 2039 struct scx_dispatch_q *dsq; 2040 unsigned long opss; 2041 2042 touch_core_sched_dispatch(rq, p); 2043 retry: 2044 /* 2045 * No need for _acquire here. @p is accessed only after a successful 2046 * try_cmpxchg to DISPATCHING. 2047 */ 2048 opss = atomic_long_read(&p->scx.ops_state); 2049 2050 switch (opss & SCX_OPSS_STATE_MASK) { 2051 case SCX_OPSS_DISPATCHING: 2052 case SCX_OPSS_NONE: 2053 /* someone else already got to it */ 2054 return; 2055 case SCX_OPSS_QUEUED: 2056 /* 2057 * If qseq doesn't match, @p has gone through at least one 2058 * dispatch/dequeue and re-enqueue cycle between 2059 * scx_bpf_dsq_insert() and here and we have no claim on it. 2060 */ 2061 if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch) 2062 return; 2063 2064 /* 2065 * While we know @p is accessible, we don't yet have a claim on 2066 * it - the BPF scheduler is allowed to dispatch tasks 2067 * spuriously and there can be a racing dequeue attempt. Let's 2068 * claim @p by atomically transitioning it from QUEUED to 2069 * DISPATCHING. 2070 */ 2071 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, 2072 SCX_OPSS_DISPATCHING))) 2073 break; 2074 goto retry; 2075 case SCX_OPSS_QUEUEING: 2076 /* 2077 * do_enqueue_task() is in the process of transferring the task 2078 * to the BPF scheduler while holding @p's rq lock. As we aren't 2079 * holding any kernel or BPF resource that the enqueue path may 2080 * depend upon, it's safe to wait. 2081 */ 2082 wait_ops_state(p, opss); 2083 goto retry; 2084 } 2085 2086 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED)); 2087 2088 dsq = find_dsq_for_dispatch(sch, this_rq(), dsq_id, p); 2089 2090 if (dsq->id == SCX_DSQ_LOCAL) 2091 dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags); 2092 else 2093 dispatch_enqueue(sch, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); 2094 } 2095 2096 static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq) 2097 { 2098 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 2099 u32 u; 2100 2101 for (u = 0; u < dspc->cursor; u++) { 2102 struct scx_dsp_buf_ent *ent = &dspc->buf[u]; 2103 2104 finish_dispatch(sch, rq, ent->task, ent->qseq, ent->dsq_id, 2105 ent->enq_flags); 2106 } 2107 2108 dspc->nr_tasks += dspc->cursor; 2109 dspc->cursor = 0; 2110 } 2111 2112 static inline void maybe_queue_balance_callback(struct rq *rq) 2113 { 2114 lockdep_assert_rq_held(rq); 2115 2116 if (!(rq->scx.flags & SCX_RQ_BAL_CB_PENDING)) 2117 return; 2118 2119 queue_balance_callback(rq, &rq->scx.deferred_bal_cb, 2120 deferred_bal_cb_workfn); 2121 2122 rq->scx.flags &= ~SCX_RQ_BAL_CB_PENDING; 2123 } 2124 2125 static int balance_one(struct rq *rq, struct task_struct *prev) 2126 { 2127 struct scx_sched *sch = scx_root; 2128 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 2129 bool prev_on_scx = prev->sched_class == &ext_sched_class; 2130 bool prev_on_rq = prev->scx.flags & SCX_TASK_QUEUED; 2131 int nr_loops = SCX_DSP_MAX_LOOPS; 2132 2133 lockdep_assert_rq_held(rq); 2134 rq->scx.flags |= SCX_RQ_IN_BALANCE; 2135 rq->scx.flags &= ~SCX_RQ_BAL_KEEP; 2136 2137 if ((sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT) && 2138 unlikely(rq->scx.cpu_released)) { 2139 /* 2140 * If the previous sched_class for the current CPU was not SCX, 2141 * notify the BPF scheduler that it again has control of the 2142 * core. This callback complements ->cpu_release(), which is 2143 * emitted in switch_class(). 2144 */ 2145 if (SCX_HAS_OP(sch, cpu_acquire)) 2146 SCX_CALL_OP(sch, SCX_KF_REST, cpu_acquire, rq, 2147 cpu_of(rq), NULL); 2148 rq->scx.cpu_released = false; 2149 } 2150 2151 if (prev_on_scx) { 2152 update_curr_scx(rq); 2153 2154 /* 2155 * If @prev is runnable & has slice left, it has priority and 2156 * fetching more just increases latency for the fetched tasks. 2157 * Tell pick_task_scx() to keep running @prev. If the BPF 2158 * scheduler wants to handle this explicitly, it should 2159 * implement ->cpu_release(). 2160 * 2161 * See scx_disable_workfn() for the explanation on the bypassing 2162 * test. 2163 */ 2164 if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) { 2165 rq->scx.flags |= SCX_RQ_BAL_KEEP; 2166 goto has_tasks; 2167 } 2168 } 2169 2170 /* if there already are tasks to run, nothing to do */ 2171 if (rq->scx.local_dsq.nr) 2172 goto has_tasks; 2173 2174 if (consume_global_dsq(sch, rq)) 2175 goto has_tasks; 2176 2177 if (scx_rq_bypassing(rq)) { 2178 if (consume_dispatch_q(sch, rq, &rq->scx.bypass_dsq)) 2179 goto has_tasks; 2180 else 2181 goto no_tasks; 2182 } 2183 2184 if (unlikely(!SCX_HAS_OP(sch, dispatch)) || !scx_rq_online(rq)) 2185 goto no_tasks; 2186 2187 dspc->rq = rq; 2188 2189 /* 2190 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock, 2191 * the local DSQ might still end up empty after a successful 2192 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch() 2193 * produced some tasks, retry. The BPF scheduler may depend on this 2194 * looping behavior to simplify its implementation. 2195 */ 2196 do { 2197 dspc->nr_tasks = 0; 2198 2199 SCX_CALL_OP(sch, SCX_KF_DISPATCH, dispatch, rq, 2200 cpu_of(rq), prev_on_scx ? prev : NULL); 2201 2202 flush_dispatch_buf(sch, rq); 2203 2204 if (prev_on_rq && prev->scx.slice) { 2205 rq->scx.flags |= SCX_RQ_BAL_KEEP; 2206 goto has_tasks; 2207 } 2208 if (rq->scx.local_dsq.nr) 2209 goto has_tasks; 2210 if (consume_global_dsq(sch, rq)) 2211 goto has_tasks; 2212 2213 /* 2214 * ops.dispatch() can trap us in this loop by repeatedly 2215 * dispatching ineligible tasks. Break out once in a while to 2216 * allow the watchdog to run. As IRQ can't be enabled in 2217 * balance(), we want to complete this scheduling cycle and then 2218 * start a new one. IOW, we want to call resched_curr() on the 2219 * next, most likely idle, task, not the current one. Use 2220 * scx_kick_cpu() for deferred kicking. 2221 */ 2222 if (unlikely(!--nr_loops)) { 2223 scx_kick_cpu(sch, cpu_of(rq), 0); 2224 break; 2225 } 2226 } while (dspc->nr_tasks); 2227 2228 no_tasks: 2229 /* 2230 * Didn't find another task to run. Keep running @prev unless 2231 * %SCX_OPS_ENQ_LAST is in effect. 2232 */ 2233 if (prev_on_rq && 2234 (!(sch->ops.flags & SCX_OPS_ENQ_LAST) || scx_rq_bypassing(rq))) { 2235 rq->scx.flags |= SCX_RQ_BAL_KEEP; 2236 __scx_add_event(sch, SCX_EV_DISPATCH_KEEP_LAST, 1); 2237 goto has_tasks; 2238 } 2239 rq->scx.flags &= ~SCX_RQ_IN_BALANCE; 2240 return false; 2241 2242 has_tasks: 2243 rq->scx.flags &= ~SCX_RQ_IN_BALANCE; 2244 return true; 2245 } 2246 2247 static void process_ddsp_deferred_locals(struct rq *rq) 2248 { 2249 struct task_struct *p; 2250 2251 lockdep_assert_rq_held(rq); 2252 2253 /* 2254 * Now that @rq can be unlocked, execute the deferred enqueueing of 2255 * tasks directly dispatched to the local DSQs of other CPUs. See 2256 * direct_dispatch(). Keep popping from the head instead of using 2257 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq 2258 * temporarily. 2259 */ 2260 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals, 2261 struct task_struct, scx.dsq_list.node))) { 2262 struct scx_sched *sch = scx_root; 2263 struct scx_dispatch_q *dsq; 2264 2265 list_del_init(&p->scx.dsq_list.node); 2266 2267 dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); 2268 if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL)) 2269 dispatch_to_local_dsq(sch, rq, dsq, p, 2270 p->scx.ddsp_enq_flags); 2271 } 2272 } 2273 2274 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) 2275 { 2276 struct scx_sched *sch = scx_root; 2277 2278 if (p->scx.flags & SCX_TASK_QUEUED) { 2279 /* 2280 * Core-sched might decide to execute @p before it is 2281 * dispatched. Call ops_dequeue() to notify the BPF scheduler. 2282 */ 2283 ops_dequeue(rq, p, SCX_DEQ_CORE_SCHED_EXEC); 2284 dispatch_dequeue(rq, p); 2285 } 2286 2287 p->se.exec_start = rq_clock_task(rq); 2288 2289 /* see dequeue_task_scx() on why we skip when !QUEUED */ 2290 if (SCX_HAS_OP(sch, running) && (p->scx.flags & SCX_TASK_QUEUED)) 2291 SCX_CALL_OP_TASK(sch, SCX_KF_REST, running, rq, p); 2292 2293 clr_task_runnable(p, true); 2294 2295 /* 2296 * @p is getting newly scheduled or got kicked after someone updated its 2297 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick(). 2298 */ 2299 if ((p->scx.slice == SCX_SLICE_INF) != 2300 (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) { 2301 if (p->scx.slice == SCX_SLICE_INF) 2302 rq->scx.flags |= SCX_RQ_CAN_STOP_TICK; 2303 else 2304 rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK; 2305 2306 sched_update_tick_dependency(rq); 2307 2308 /* 2309 * For now, let's refresh the load_avgs just when transitioning 2310 * in and out of nohz. In the future, we might want to add a 2311 * mechanism which calls the following periodically on 2312 * tick-stopped CPUs. 2313 */ 2314 update_other_load_avgs(rq); 2315 } 2316 } 2317 2318 static enum scx_cpu_preempt_reason 2319 preempt_reason_from_class(const struct sched_class *class) 2320 { 2321 if (class == &stop_sched_class) 2322 return SCX_CPU_PREEMPT_STOP; 2323 if (class == &dl_sched_class) 2324 return SCX_CPU_PREEMPT_DL; 2325 if (class == &rt_sched_class) 2326 return SCX_CPU_PREEMPT_RT; 2327 return SCX_CPU_PREEMPT_UNKNOWN; 2328 } 2329 2330 static void switch_class(struct rq *rq, struct task_struct *next) 2331 { 2332 struct scx_sched *sch = scx_root; 2333 const struct sched_class *next_class = next->sched_class; 2334 2335 if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT)) 2336 return; 2337 2338 /* 2339 * The callback is conceptually meant to convey that the CPU is no 2340 * longer under the control of SCX. Therefore, don't invoke the callback 2341 * if the next class is below SCX (in which case the BPF scheduler has 2342 * actively decided not to schedule any tasks on the CPU). 2343 */ 2344 if (sched_class_above(&ext_sched_class, next_class)) 2345 return; 2346 2347 /* 2348 * At this point we know that SCX was preempted by a higher priority 2349 * sched_class, so invoke the ->cpu_release() callback if we have not 2350 * done so already. We only send the callback once between SCX being 2351 * preempted, and it regaining control of the CPU. 2352 * 2353 * ->cpu_release() complements ->cpu_acquire(), which is emitted the 2354 * next time that balance_scx() is invoked. 2355 */ 2356 if (!rq->scx.cpu_released) { 2357 if (SCX_HAS_OP(sch, cpu_release)) { 2358 struct scx_cpu_release_args args = { 2359 .reason = preempt_reason_from_class(next_class), 2360 .task = next, 2361 }; 2362 2363 SCX_CALL_OP(sch, SCX_KF_CPU_RELEASE, cpu_release, rq, 2364 cpu_of(rq), &args); 2365 } 2366 rq->scx.cpu_released = true; 2367 } 2368 } 2369 2370 static void put_prev_task_scx(struct rq *rq, struct task_struct *p, 2371 struct task_struct *next) 2372 { 2373 struct scx_sched *sch = scx_root; 2374 2375 /* see kick_cpus_irq_workfn() */ 2376 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1); 2377 2378 update_curr_scx(rq); 2379 2380 /* see dequeue_task_scx() on why we skip when !QUEUED */ 2381 if (SCX_HAS_OP(sch, stopping) && (p->scx.flags & SCX_TASK_QUEUED)) 2382 SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, true); 2383 2384 if (p->scx.flags & SCX_TASK_QUEUED) { 2385 set_task_runnable(rq, p); 2386 2387 /* 2388 * If @p has slice left and is being put, @p is getting 2389 * preempted by a higher priority scheduler class or core-sched 2390 * forcing a different task. Leave it at the head of the local 2391 * DSQ. 2392 */ 2393 if (p->scx.slice && !scx_rq_bypassing(rq)) { 2394 dispatch_enqueue(sch, &rq->scx.local_dsq, p, 2395 SCX_ENQ_HEAD); 2396 goto switch_class; 2397 } 2398 2399 /* 2400 * If @p is runnable but we're about to enter a lower 2401 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell 2402 * ops.enqueue() that @p is the only one available for this cpu, 2403 * which should trigger an explicit follow-up scheduling event. 2404 */ 2405 if (sched_class_above(&ext_sched_class, next->sched_class)) { 2406 WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST)); 2407 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1); 2408 } else { 2409 do_enqueue_task(rq, p, 0, -1); 2410 } 2411 } 2412 2413 switch_class: 2414 if (next && next->sched_class != &ext_sched_class) 2415 switch_class(rq, next); 2416 } 2417 2418 static struct task_struct *first_local_task(struct rq *rq) 2419 { 2420 return list_first_entry_or_null(&rq->scx.local_dsq.list, 2421 struct task_struct, scx.dsq_list.node); 2422 } 2423 2424 static struct task_struct * 2425 do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx) 2426 { 2427 struct task_struct *prev = rq->curr; 2428 bool keep_prev, kick_idle = false; 2429 struct task_struct *p; 2430 2431 /* see kick_cpus_irq_workfn() */ 2432 smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1); 2433 2434 rq_modified_clear(rq); 2435 2436 rq_unpin_lock(rq, rf); 2437 balance_one(rq, prev); 2438 rq_repin_lock(rq, rf); 2439 maybe_queue_balance_callback(rq); 2440 2441 /* 2442 * If any higher-priority sched class enqueued a runnable task on 2443 * this rq during balance_one(), abort and return RETRY_TASK, so 2444 * that the scheduler loop can restart. 2445 * 2446 * If @force_scx is true, always try to pick a SCHED_EXT task, 2447 * regardless of any higher-priority sched classes activity. 2448 */ 2449 if (!force_scx && rq_modified_above(rq, &ext_sched_class)) 2450 return RETRY_TASK; 2451 2452 keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP; 2453 if (unlikely(keep_prev && 2454 prev->sched_class != &ext_sched_class)) { 2455 WARN_ON_ONCE(scx_enable_state() == SCX_ENABLED); 2456 keep_prev = false; 2457 } 2458 2459 /* 2460 * If balance_scx() is telling us to keep running @prev, replenish slice 2461 * if necessary and keep running @prev. Otherwise, pop the first one 2462 * from the local DSQ. 2463 */ 2464 if (keep_prev) { 2465 p = prev; 2466 if (!p->scx.slice) 2467 refill_task_slice_dfl(rcu_dereference_sched(scx_root), p); 2468 } else { 2469 p = first_local_task(rq); 2470 if (!p) { 2471 if (kick_idle) 2472 scx_kick_cpu(rcu_dereference_sched(scx_root), 2473 cpu_of(rq), SCX_KICK_IDLE); 2474 return NULL; 2475 } 2476 2477 if (unlikely(!p->scx.slice)) { 2478 struct scx_sched *sch = rcu_dereference_sched(scx_root); 2479 2480 if (!scx_rq_bypassing(rq) && !sch->warned_zero_slice) { 2481 printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n", 2482 p->comm, p->pid, __func__); 2483 sch->warned_zero_slice = true; 2484 } 2485 refill_task_slice_dfl(sch, p); 2486 } 2487 } 2488 2489 return p; 2490 } 2491 2492 static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf) 2493 { 2494 return do_pick_task_scx(rq, rf, false); 2495 } 2496 2497 #ifdef CONFIG_SCHED_CORE 2498 /** 2499 * scx_prio_less - Task ordering for core-sched 2500 * @a: task A 2501 * @b: task B 2502 * @in_fi: in forced idle state 2503 * 2504 * Core-sched is implemented as an additional scheduling layer on top of the 2505 * usual sched_class'es and needs to find out the expected task ordering. For 2506 * SCX, core-sched calls this function to interrogate the task ordering. 2507 * 2508 * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used 2509 * to implement the default task ordering. The older the timestamp, the higher 2510 * priority the task - the global FIFO ordering matching the default scheduling 2511 * behavior. 2512 * 2513 * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to 2514 * implement FIFO ordering within each local DSQ. See pick_task_scx(). 2515 */ 2516 bool scx_prio_less(const struct task_struct *a, const struct task_struct *b, 2517 bool in_fi) 2518 { 2519 struct scx_sched *sch = scx_root; 2520 2521 /* 2522 * The const qualifiers are dropped from task_struct pointers when 2523 * calling ops.core_sched_before(). Accesses are controlled by the 2524 * verifier. 2525 */ 2526 if (SCX_HAS_OP(sch, core_sched_before) && 2527 !scx_rq_bypassing(task_rq(a))) 2528 return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, core_sched_before, 2529 NULL, 2530 (struct task_struct *)a, 2531 (struct task_struct *)b); 2532 else 2533 return time_after64(a->scx.core_sched_at, b->scx.core_sched_at); 2534 } 2535 #endif /* CONFIG_SCHED_CORE */ 2536 2537 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags) 2538 { 2539 struct scx_sched *sch = scx_root; 2540 bool rq_bypass; 2541 2542 /* 2543 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it 2544 * can be a good migration opportunity with low cache and memory 2545 * footprint. Returning a CPU different than @prev_cpu triggers 2546 * immediate rq migration. However, for SCX, as the current rq 2547 * association doesn't dictate where the task is going to run, this 2548 * doesn't fit well. If necessary, we can later add a dedicated method 2549 * which can decide to preempt self to force it through the regular 2550 * scheduling path. 2551 */ 2552 if (unlikely(wake_flags & WF_EXEC)) 2553 return prev_cpu; 2554 2555 rq_bypass = scx_rq_bypassing(task_rq(p)); 2556 if (likely(SCX_HAS_OP(sch, select_cpu)) && !rq_bypass) { 2557 s32 cpu; 2558 struct task_struct **ddsp_taskp; 2559 2560 ddsp_taskp = this_cpu_ptr(&direct_dispatch_task); 2561 WARN_ON_ONCE(*ddsp_taskp); 2562 *ddsp_taskp = p; 2563 2564 cpu = SCX_CALL_OP_TASK_RET(sch, 2565 SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU, 2566 select_cpu, NULL, p, prev_cpu, 2567 wake_flags); 2568 p->scx.selected_cpu = cpu; 2569 *ddsp_taskp = NULL; 2570 if (ops_cpu_valid(sch, cpu, "from ops.select_cpu()")) 2571 return cpu; 2572 else 2573 return prev_cpu; 2574 } else { 2575 s32 cpu; 2576 2577 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0); 2578 if (cpu >= 0) { 2579 refill_task_slice_dfl(sch, p); 2580 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL; 2581 } else { 2582 cpu = prev_cpu; 2583 } 2584 p->scx.selected_cpu = cpu; 2585 2586 if (rq_bypass) 2587 __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1); 2588 return cpu; 2589 } 2590 } 2591 2592 static void task_woken_scx(struct rq *rq, struct task_struct *p) 2593 { 2594 run_deferred(rq); 2595 } 2596 2597 static void set_cpus_allowed_scx(struct task_struct *p, 2598 struct affinity_context *ac) 2599 { 2600 struct scx_sched *sch = scx_root; 2601 2602 set_cpus_allowed_common(p, ac); 2603 2604 /* 2605 * The effective cpumask is stored in @p->cpus_ptr which may temporarily 2606 * differ from the configured one in @p->cpus_mask. Always tell the bpf 2607 * scheduler the effective one. 2608 * 2609 * Fine-grained memory write control is enforced by BPF making the const 2610 * designation pointless. Cast it away when calling the operation. 2611 */ 2612 if (SCX_HAS_OP(sch, set_cpumask)) 2613 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, NULL, 2614 p, (struct cpumask *)p->cpus_ptr); 2615 } 2616 2617 static void handle_hotplug(struct rq *rq, bool online) 2618 { 2619 struct scx_sched *sch = scx_root; 2620 int cpu = cpu_of(rq); 2621 2622 atomic_long_inc(&scx_hotplug_seq); 2623 2624 /* 2625 * scx_root updates are protected by cpus_read_lock() and will stay 2626 * stable here. Note that we can't depend on scx_enabled() test as the 2627 * hotplug ops need to be enabled before __scx_enabled is set. 2628 */ 2629 if (unlikely(!sch)) 2630 return; 2631 2632 if (scx_enabled()) 2633 scx_idle_update_selcpu_topology(&sch->ops); 2634 2635 if (online && SCX_HAS_OP(sch, cpu_online)) 2636 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_online, NULL, cpu); 2637 else if (!online && SCX_HAS_OP(sch, cpu_offline)) 2638 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cpu_offline, NULL, cpu); 2639 else 2640 scx_exit(sch, SCX_EXIT_UNREG_KERN, 2641 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG, 2642 "cpu %d going %s, exiting scheduler", cpu, 2643 online ? "online" : "offline"); 2644 } 2645 2646 void scx_rq_activate(struct rq *rq) 2647 { 2648 handle_hotplug(rq, true); 2649 } 2650 2651 void scx_rq_deactivate(struct rq *rq) 2652 { 2653 handle_hotplug(rq, false); 2654 } 2655 2656 static void rq_online_scx(struct rq *rq) 2657 { 2658 rq->scx.flags |= SCX_RQ_ONLINE; 2659 } 2660 2661 static void rq_offline_scx(struct rq *rq) 2662 { 2663 rq->scx.flags &= ~SCX_RQ_ONLINE; 2664 } 2665 2666 2667 static bool check_rq_for_timeouts(struct rq *rq) 2668 { 2669 struct scx_sched *sch; 2670 struct task_struct *p; 2671 struct rq_flags rf; 2672 bool timed_out = false; 2673 2674 rq_lock_irqsave(rq, &rf); 2675 sch = rcu_dereference_bh(scx_root); 2676 if (unlikely(!sch)) 2677 goto out_unlock; 2678 2679 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) { 2680 unsigned long last_runnable = p->scx.runnable_at; 2681 2682 if (unlikely(time_after(jiffies, 2683 last_runnable + scx_watchdog_timeout))) { 2684 u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable); 2685 2686 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0, 2687 "%s[%d] failed to run for %u.%03us", 2688 p->comm, p->pid, dur_ms / 1000, dur_ms % 1000); 2689 timed_out = true; 2690 break; 2691 } 2692 } 2693 out_unlock: 2694 rq_unlock_irqrestore(rq, &rf); 2695 return timed_out; 2696 } 2697 2698 static void scx_watchdog_workfn(struct work_struct *work) 2699 { 2700 int cpu; 2701 2702 WRITE_ONCE(scx_watchdog_timestamp, jiffies); 2703 2704 for_each_online_cpu(cpu) { 2705 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu)))) 2706 break; 2707 2708 cond_resched(); 2709 } 2710 queue_delayed_work(system_unbound_wq, to_delayed_work(work), 2711 scx_watchdog_timeout / 2); 2712 } 2713 2714 void scx_tick(struct rq *rq) 2715 { 2716 struct scx_sched *sch; 2717 unsigned long last_check; 2718 2719 if (!scx_enabled()) 2720 return; 2721 2722 sch = rcu_dereference_bh(scx_root); 2723 if (unlikely(!sch)) 2724 return; 2725 2726 last_check = READ_ONCE(scx_watchdog_timestamp); 2727 if (unlikely(time_after(jiffies, 2728 last_check + READ_ONCE(scx_watchdog_timeout)))) { 2729 u32 dur_ms = jiffies_to_msecs(jiffies - last_check); 2730 2731 scx_exit(sch, SCX_EXIT_ERROR_STALL, 0, 2732 "watchdog failed to check in for %u.%03us", 2733 dur_ms / 1000, dur_ms % 1000); 2734 } 2735 2736 update_other_load_avgs(rq); 2737 } 2738 2739 static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued) 2740 { 2741 struct scx_sched *sch = scx_root; 2742 2743 update_curr_scx(rq); 2744 2745 /* 2746 * While disabling, always resched and refresh core-sched timestamp as 2747 * we can't trust the slice management or ops.core_sched_before(). 2748 */ 2749 if (scx_rq_bypassing(rq)) { 2750 curr->scx.slice = 0; 2751 touch_core_sched(rq, curr); 2752 } else if (SCX_HAS_OP(sch, tick)) { 2753 SCX_CALL_OP_TASK(sch, SCX_KF_REST, tick, rq, curr); 2754 } 2755 2756 if (!curr->scx.slice) 2757 resched_curr(rq); 2758 } 2759 2760 #ifdef CONFIG_EXT_GROUP_SCHED 2761 static struct cgroup *tg_cgrp(struct task_group *tg) 2762 { 2763 /* 2764 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup, 2765 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the 2766 * root cgroup. 2767 */ 2768 if (tg && tg->css.cgroup) 2769 return tg->css.cgroup; 2770 else 2771 return &cgrp_dfl_root.cgrp; 2772 } 2773 2774 #define SCX_INIT_TASK_ARGS_CGROUP(tg) .cgroup = tg_cgrp(tg), 2775 2776 #else /* CONFIG_EXT_GROUP_SCHED */ 2777 2778 #define SCX_INIT_TASK_ARGS_CGROUP(tg) 2779 2780 #endif /* CONFIG_EXT_GROUP_SCHED */ 2781 2782 static enum scx_task_state scx_get_task_state(const struct task_struct *p) 2783 { 2784 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT; 2785 } 2786 2787 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state) 2788 { 2789 enum scx_task_state prev_state = scx_get_task_state(p); 2790 bool warn = false; 2791 2792 BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS)); 2793 2794 switch (state) { 2795 case SCX_TASK_NONE: 2796 break; 2797 case SCX_TASK_INIT: 2798 warn = prev_state != SCX_TASK_NONE; 2799 break; 2800 case SCX_TASK_READY: 2801 warn = prev_state == SCX_TASK_NONE; 2802 break; 2803 case SCX_TASK_ENABLED: 2804 warn = prev_state != SCX_TASK_READY; 2805 break; 2806 default: 2807 warn = true; 2808 return; 2809 } 2810 2811 WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]", 2812 prev_state, state, p->comm, p->pid); 2813 2814 p->scx.flags &= ~SCX_TASK_STATE_MASK; 2815 p->scx.flags |= state << SCX_TASK_STATE_SHIFT; 2816 } 2817 2818 static int scx_init_task(struct task_struct *p, struct task_group *tg, bool fork) 2819 { 2820 struct scx_sched *sch = scx_root; 2821 int ret; 2822 2823 p->scx.disallow = false; 2824 2825 if (SCX_HAS_OP(sch, init_task)) { 2826 struct scx_init_task_args args = { 2827 SCX_INIT_TASK_ARGS_CGROUP(tg) 2828 .fork = fork, 2829 }; 2830 2831 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init_task, NULL, 2832 p, &args); 2833 if (unlikely(ret)) { 2834 ret = ops_sanitize_err(sch, "init_task", ret); 2835 return ret; 2836 } 2837 } 2838 2839 scx_set_task_state(p, SCX_TASK_INIT); 2840 2841 if (p->scx.disallow) { 2842 if (!fork) { 2843 struct rq *rq; 2844 struct rq_flags rf; 2845 2846 rq = task_rq_lock(p, &rf); 2847 2848 /* 2849 * We're in the load path and @p->policy will be applied 2850 * right after. Reverting @p->policy here and rejecting 2851 * %SCHED_EXT transitions from scx_check_setscheduler() 2852 * guarantees that if ops.init_task() sets @p->disallow, 2853 * @p can never be in SCX. 2854 */ 2855 if (p->policy == SCHED_EXT) { 2856 p->policy = SCHED_NORMAL; 2857 atomic_long_inc(&scx_nr_rejected); 2858 } 2859 2860 task_rq_unlock(rq, p, &rf); 2861 } else if (p->policy == SCHED_EXT) { 2862 scx_error(sch, "ops.init_task() set task->scx.disallow for %s[%d] during fork", 2863 p->comm, p->pid); 2864 } 2865 } 2866 2867 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; 2868 return 0; 2869 } 2870 2871 static void scx_enable_task(struct task_struct *p) 2872 { 2873 struct scx_sched *sch = scx_root; 2874 struct rq *rq = task_rq(p); 2875 u32 weight; 2876 2877 lockdep_assert_rq_held(rq); 2878 2879 /* 2880 * Set the weight before calling ops.enable() so that the scheduler 2881 * doesn't see a stale value if they inspect the task struct. 2882 */ 2883 if (task_has_idle_policy(p)) 2884 weight = WEIGHT_IDLEPRIO; 2885 else 2886 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO]; 2887 2888 p->scx.weight = sched_weight_to_cgroup(weight); 2889 2890 if (SCX_HAS_OP(sch, enable)) 2891 SCX_CALL_OP_TASK(sch, SCX_KF_REST, enable, rq, p); 2892 scx_set_task_state(p, SCX_TASK_ENABLED); 2893 2894 if (SCX_HAS_OP(sch, set_weight)) 2895 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq, 2896 p, p->scx.weight); 2897 } 2898 2899 static void scx_disable_task(struct task_struct *p) 2900 { 2901 struct scx_sched *sch = scx_root; 2902 struct rq *rq = task_rq(p); 2903 2904 lockdep_assert_rq_held(rq); 2905 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED); 2906 2907 if (SCX_HAS_OP(sch, disable)) 2908 SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p); 2909 scx_set_task_state(p, SCX_TASK_READY); 2910 } 2911 2912 static void scx_exit_task(struct task_struct *p) 2913 { 2914 struct scx_sched *sch = scx_root; 2915 struct scx_exit_task_args args = { 2916 .cancelled = false, 2917 }; 2918 2919 lockdep_assert_rq_held(task_rq(p)); 2920 2921 switch (scx_get_task_state(p)) { 2922 case SCX_TASK_NONE: 2923 return; 2924 case SCX_TASK_INIT: 2925 args.cancelled = true; 2926 break; 2927 case SCX_TASK_READY: 2928 break; 2929 case SCX_TASK_ENABLED: 2930 scx_disable_task(p); 2931 break; 2932 default: 2933 WARN_ON_ONCE(true); 2934 return; 2935 } 2936 2937 if (SCX_HAS_OP(sch, exit_task)) 2938 SCX_CALL_OP_TASK(sch, SCX_KF_REST, exit_task, task_rq(p), 2939 p, &args); 2940 scx_set_task_state(p, SCX_TASK_NONE); 2941 } 2942 2943 void init_scx_entity(struct sched_ext_entity *scx) 2944 { 2945 memset(scx, 0, sizeof(*scx)); 2946 INIT_LIST_HEAD(&scx->dsq_list.node); 2947 RB_CLEAR_NODE(&scx->dsq_priq); 2948 scx->sticky_cpu = -1; 2949 scx->holding_cpu = -1; 2950 INIT_LIST_HEAD(&scx->runnable_node); 2951 scx->runnable_at = jiffies; 2952 scx->ddsp_dsq_id = SCX_DSQ_INVALID; 2953 scx->slice = READ_ONCE(scx_slice_dfl); 2954 } 2955 2956 void scx_pre_fork(struct task_struct *p) 2957 { 2958 /* 2959 * BPF scheduler enable/disable paths want to be able to iterate and 2960 * update all tasks which can become complex when racing forks. As 2961 * enable/disable are very cold paths, let's use a percpu_rwsem to 2962 * exclude forks. 2963 */ 2964 percpu_down_read(&scx_fork_rwsem); 2965 } 2966 2967 int scx_fork(struct task_struct *p) 2968 { 2969 percpu_rwsem_assert_held(&scx_fork_rwsem); 2970 2971 if (scx_init_task_enabled) 2972 return scx_init_task(p, task_group(p), true); 2973 else 2974 return 0; 2975 } 2976 2977 void scx_post_fork(struct task_struct *p) 2978 { 2979 if (scx_init_task_enabled) { 2980 scx_set_task_state(p, SCX_TASK_READY); 2981 2982 /* 2983 * Enable the task immediately if it's running on sched_ext. 2984 * Otherwise, it'll be enabled in switching_to_scx() if and 2985 * when it's ever configured to run with a SCHED_EXT policy. 2986 */ 2987 if (p->sched_class == &ext_sched_class) { 2988 struct rq_flags rf; 2989 struct rq *rq; 2990 2991 rq = task_rq_lock(p, &rf); 2992 scx_enable_task(p); 2993 task_rq_unlock(rq, p, &rf); 2994 } 2995 } 2996 2997 raw_spin_lock_irq(&scx_tasks_lock); 2998 list_add_tail(&p->scx.tasks_node, &scx_tasks); 2999 raw_spin_unlock_irq(&scx_tasks_lock); 3000 3001 percpu_up_read(&scx_fork_rwsem); 3002 } 3003 3004 void scx_cancel_fork(struct task_struct *p) 3005 { 3006 if (scx_enabled()) { 3007 struct rq *rq; 3008 struct rq_flags rf; 3009 3010 rq = task_rq_lock(p, &rf); 3011 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY); 3012 scx_exit_task(p); 3013 task_rq_unlock(rq, p, &rf); 3014 } 3015 3016 percpu_up_read(&scx_fork_rwsem); 3017 } 3018 3019 void sched_ext_dead(struct task_struct *p) 3020 { 3021 unsigned long flags; 3022 3023 raw_spin_lock_irqsave(&scx_tasks_lock, flags); 3024 list_del_init(&p->scx.tasks_node); 3025 raw_spin_unlock_irqrestore(&scx_tasks_lock, flags); 3026 3027 /* 3028 * @p is off scx_tasks and wholly ours. scx_enable()'s READY -> ENABLED 3029 * transitions can't race us. Disable ops for @p. 3030 */ 3031 if (scx_get_task_state(p) != SCX_TASK_NONE) { 3032 struct rq_flags rf; 3033 struct rq *rq; 3034 3035 rq = task_rq_lock(p, &rf); 3036 scx_exit_task(p); 3037 task_rq_unlock(rq, p, &rf); 3038 } 3039 } 3040 3041 static void reweight_task_scx(struct rq *rq, struct task_struct *p, 3042 const struct load_weight *lw) 3043 { 3044 struct scx_sched *sch = scx_root; 3045 3046 lockdep_assert_rq_held(task_rq(p)); 3047 3048 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight)); 3049 if (SCX_HAS_OP(sch, set_weight)) 3050 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq, 3051 p, p->scx.weight); 3052 } 3053 3054 static void prio_changed_scx(struct rq *rq, struct task_struct *p, u64 oldprio) 3055 { 3056 } 3057 3058 static void switching_to_scx(struct rq *rq, struct task_struct *p) 3059 { 3060 struct scx_sched *sch = scx_root; 3061 3062 scx_enable_task(p); 3063 3064 /* 3065 * set_cpus_allowed_scx() is not called while @p is associated with a 3066 * different scheduler class. Keep the BPF scheduler up-to-date. 3067 */ 3068 if (SCX_HAS_OP(sch, set_cpumask)) 3069 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, rq, 3070 p, (struct cpumask *)p->cpus_ptr); 3071 } 3072 3073 static void switched_from_scx(struct rq *rq, struct task_struct *p) 3074 { 3075 scx_disable_task(p); 3076 } 3077 3078 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {} 3079 static void switched_to_scx(struct rq *rq, struct task_struct *p) {} 3080 3081 int scx_check_setscheduler(struct task_struct *p, int policy) 3082 { 3083 lockdep_assert_rq_held(task_rq(p)); 3084 3085 /* if disallow, reject transitioning into SCX */ 3086 if (scx_enabled() && READ_ONCE(p->scx.disallow) && 3087 p->policy != policy && policy == SCHED_EXT) 3088 return -EACCES; 3089 3090 return 0; 3091 } 3092 3093 #ifdef CONFIG_NO_HZ_FULL 3094 bool scx_can_stop_tick(struct rq *rq) 3095 { 3096 struct task_struct *p = rq->curr; 3097 3098 if (scx_rq_bypassing(rq)) 3099 return false; 3100 3101 if (p->sched_class != &ext_sched_class) 3102 return true; 3103 3104 /* 3105 * @rq can dispatch from different DSQs, so we can't tell whether it 3106 * needs the tick or not by looking at nr_running. Allow stopping ticks 3107 * iff the BPF scheduler indicated so. See set_next_task_scx(). 3108 */ 3109 return rq->scx.flags & SCX_RQ_CAN_STOP_TICK; 3110 } 3111 #endif 3112 3113 #ifdef CONFIG_EXT_GROUP_SCHED 3114 3115 DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_ops_rwsem); 3116 static bool scx_cgroup_enabled; 3117 3118 void scx_tg_init(struct task_group *tg) 3119 { 3120 tg->scx.weight = CGROUP_WEIGHT_DFL; 3121 tg->scx.bw_period_us = default_bw_period_us(); 3122 tg->scx.bw_quota_us = RUNTIME_INF; 3123 tg->scx.idle = false; 3124 } 3125 3126 int scx_tg_online(struct task_group *tg) 3127 { 3128 struct scx_sched *sch = scx_root; 3129 int ret = 0; 3130 3131 WARN_ON_ONCE(tg->scx.flags & (SCX_TG_ONLINE | SCX_TG_INITED)); 3132 3133 if (scx_cgroup_enabled) { 3134 if (SCX_HAS_OP(sch, cgroup_init)) { 3135 struct scx_cgroup_init_args args = 3136 { .weight = tg->scx.weight, 3137 .bw_period_us = tg->scx.bw_period_us, 3138 .bw_quota_us = tg->scx.bw_quota_us, 3139 .bw_burst_us = tg->scx.bw_burst_us }; 3140 3141 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, 3142 NULL, tg->css.cgroup, &args); 3143 if (ret) 3144 ret = ops_sanitize_err(sch, "cgroup_init", ret); 3145 } 3146 if (ret == 0) 3147 tg->scx.flags |= SCX_TG_ONLINE | SCX_TG_INITED; 3148 } else { 3149 tg->scx.flags |= SCX_TG_ONLINE; 3150 } 3151 3152 return ret; 3153 } 3154 3155 void scx_tg_offline(struct task_group *tg) 3156 { 3157 struct scx_sched *sch = scx_root; 3158 3159 WARN_ON_ONCE(!(tg->scx.flags & SCX_TG_ONLINE)); 3160 3161 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_exit) && 3162 (tg->scx.flags & SCX_TG_INITED)) 3163 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL, 3164 tg->css.cgroup); 3165 tg->scx.flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED); 3166 } 3167 3168 int scx_cgroup_can_attach(struct cgroup_taskset *tset) 3169 { 3170 struct scx_sched *sch = scx_root; 3171 struct cgroup_subsys_state *css; 3172 struct task_struct *p; 3173 int ret; 3174 3175 if (!scx_cgroup_enabled) 3176 return 0; 3177 3178 cgroup_taskset_for_each(p, css, tset) { 3179 struct cgroup *from = tg_cgrp(task_group(p)); 3180 struct cgroup *to = tg_cgrp(css_tg(css)); 3181 3182 WARN_ON_ONCE(p->scx.cgrp_moving_from); 3183 3184 /* 3185 * sched_move_task() omits identity migrations. Let's match the 3186 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move() 3187 * always match one-to-one. 3188 */ 3189 if (from == to) 3190 continue; 3191 3192 if (SCX_HAS_OP(sch, cgroup_prep_move)) { 3193 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, 3194 cgroup_prep_move, NULL, 3195 p, from, css->cgroup); 3196 if (ret) 3197 goto err; 3198 } 3199 3200 p->scx.cgrp_moving_from = from; 3201 } 3202 3203 return 0; 3204 3205 err: 3206 cgroup_taskset_for_each(p, css, tset) { 3207 if (SCX_HAS_OP(sch, cgroup_cancel_move) && 3208 p->scx.cgrp_moving_from) 3209 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL, 3210 p, p->scx.cgrp_moving_from, css->cgroup); 3211 p->scx.cgrp_moving_from = NULL; 3212 } 3213 3214 return ops_sanitize_err(sch, "cgroup_prep_move", ret); 3215 } 3216 3217 void scx_cgroup_move_task(struct task_struct *p) 3218 { 3219 struct scx_sched *sch = scx_root; 3220 3221 if (!scx_cgroup_enabled) 3222 return; 3223 3224 /* 3225 * @p must have ops.cgroup_prep_move() called on it and thus 3226 * cgrp_moving_from set. 3227 */ 3228 if (SCX_HAS_OP(sch, cgroup_move) && 3229 !WARN_ON_ONCE(!p->scx.cgrp_moving_from)) 3230 SCX_CALL_OP_TASK(sch, SCX_KF_UNLOCKED, cgroup_move, NULL, 3231 p, p->scx.cgrp_moving_from, 3232 tg_cgrp(task_group(p))); 3233 p->scx.cgrp_moving_from = NULL; 3234 } 3235 3236 void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) 3237 { 3238 struct scx_sched *sch = scx_root; 3239 struct cgroup_subsys_state *css; 3240 struct task_struct *p; 3241 3242 if (!scx_cgroup_enabled) 3243 return; 3244 3245 cgroup_taskset_for_each(p, css, tset) { 3246 if (SCX_HAS_OP(sch, cgroup_cancel_move) && 3247 p->scx.cgrp_moving_from) 3248 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_cancel_move, NULL, 3249 p, p->scx.cgrp_moving_from, css->cgroup); 3250 p->scx.cgrp_moving_from = NULL; 3251 } 3252 } 3253 3254 void scx_group_set_weight(struct task_group *tg, unsigned long weight) 3255 { 3256 struct scx_sched *sch = scx_root; 3257 3258 percpu_down_read(&scx_cgroup_ops_rwsem); 3259 3260 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) && 3261 tg->scx.weight != weight) 3262 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_weight, NULL, 3263 tg_cgrp(tg), weight); 3264 3265 tg->scx.weight = weight; 3266 3267 percpu_up_read(&scx_cgroup_ops_rwsem); 3268 } 3269 3270 void scx_group_set_idle(struct task_group *tg, bool idle) 3271 { 3272 struct scx_sched *sch = scx_root; 3273 3274 percpu_down_read(&scx_cgroup_ops_rwsem); 3275 3276 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_idle)) 3277 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_idle, NULL, 3278 tg_cgrp(tg), idle); 3279 3280 /* Update the task group's idle state */ 3281 tg->scx.idle = idle; 3282 3283 percpu_up_read(&scx_cgroup_ops_rwsem); 3284 } 3285 3286 void scx_group_set_bandwidth(struct task_group *tg, 3287 u64 period_us, u64 quota_us, u64 burst_us) 3288 { 3289 struct scx_sched *sch = scx_root; 3290 3291 percpu_down_read(&scx_cgroup_ops_rwsem); 3292 3293 if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_bandwidth) && 3294 (tg->scx.bw_period_us != period_us || 3295 tg->scx.bw_quota_us != quota_us || 3296 tg->scx.bw_burst_us != burst_us)) 3297 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_bandwidth, NULL, 3298 tg_cgrp(tg), period_us, quota_us, burst_us); 3299 3300 tg->scx.bw_period_us = period_us; 3301 tg->scx.bw_quota_us = quota_us; 3302 tg->scx.bw_burst_us = burst_us; 3303 3304 percpu_up_read(&scx_cgroup_ops_rwsem); 3305 } 3306 3307 static void scx_cgroup_lock(void) 3308 { 3309 percpu_down_write(&scx_cgroup_ops_rwsem); 3310 cgroup_lock(); 3311 } 3312 3313 static void scx_cgroup_unlock(void) 3314 { 3315 cgroup_unlock(); 3316 percpu_up_write(&scx_cgroup_ops_rwsem); 3317 } 3318 3319 #else /* CONFIG_EXT_GROUP_SCHED */ 3320 3321 static void scx_cgroup_lock(void) {} 3322 static void scx_cgroup_unlock(void) {} 3323 3324 #endif /* CONFIG_EXT_GROUP_SCHED */ 3325 3326 /* 3327 * Omitted operations: 3328 * 3329 * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task 3330 * isn't tied to the CPU at that point. Preemption is implemented by resetting 3331 * the victim task's slice to 0 and triggering reschedule on the target CPU. 3332 * 3333 * - migrate_task_rq: Unnecessary as task to cpu mapping is transient. 3334 * 3335 * - task_fork/dead: We need fork/dead notifications for all tasks regardless of 3336 * their current sched_class. Call them directly from sched core instead. 3337 */ 3338 DEFINE_SCHED_CLASS(ext) = { 3339 .queue_mask = 1, 3340 3341 .enqueue_task = enqueue_task_scx, 3342 .dequeue_task = dequeue_task_scx, 3343 .yield_task = yield_task_scx, 3344 .yield_to_task = yield_to_task_scx, 3345 3346 .wakeup_preempt = wakeup_preempt_scx, 3347 3348 .pick_task = pick_task_scx, 3349 3350 .put_prev_task = put_prev_task_scx, 3351 .set_next_task = set_next_task_scx, 3352 3353 .select_task_rq = select_task_rq_scx, 3354 .task_woken = task_woken_scx, 3355 .set_cpus_allowed = set_cpus_allowed_scx, 3356 3357 .rq_online = rq_online_scx, 3358 .rq_offline = rq_offline_scx, 3359 3360 .task_tick = task_tick_scx, 3361 3362 .switching_to = switching_to_scx, 3363 .switched_from = switched_from_scx, 3364 .switched_to = switched_to_scx, 3365 .reweight_task = reweight_task_scx, 3366 .prio_changed = prio_changed_scx, 3367 3368 .update_curr = update_curr_scx, 3369 3370 #ifdef CONFIG_UCLAMP_TASK 3371 .uclamp_enabled = 1, 3372 #endif 3373 }; 3374 3375 static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id) 3376 { 3377 memset(dsq, 0, sizeof(*dsq)); 3378 3379 raw_spin_lock_init(&dsq->lock); 3380 INIT_LIST_HEAD(&dsq->list); 3381 dsq->id = dsq_id; 3382 } 3383 3384 static void free_dsq_irq_workfn(struct irq_work *irq_work) 3385 { 3386 struct llist_node *to_free = llist_del_all(&dsqs_to_free); 3387 struct scx_dispatch_q *dsq, *tmp_dsq; 3388 3389 llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node) 3390 kfree_rcu(dsq, rcu); 3391 } 3392 3393 static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn); 3394 3395 static void destroy_dsq(struct scx_sched *sch, u64 dsq_id) 3396 { 3397 struct scx_dispatch_q *dsq; 3398 unsigned long flags; 3399 3400 rcu_read_lock(); 3401 3402 dsq = find_user_dsq(sch, dsq_id); 3403 if (!dsq) 3404 goto out_unlock_rcu; 3405 3406 raw_spin_lock_irqsave(&dsq->lock, flags); 3407 3408 if (dsq->nr) { 3409 scx_error(sch, "attempting to destroy in-use dsq 0x%016llx (nr=%u)", 3410 dsq->id, dsq->nr); 3411 goto out_unlock_dsq; 3412 } 3413 3414 if (rhashtable_remove_fast(&sch->dsq_hash, &dsq->hash_node, 3415 dsq_hash_params)) 3416 goto out_unlock_dsq; 3417 3418 /* 3419 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from 3420 * queueing more tasks. As this function can be called from anywhere, 3421 * freeing is bounced through an irq work to avoid nesting RCU 3422 * operations inside scheduler locks. 3423 */ 3424 dsq->id = SCX_DSQ_INVALID; 3425 llist_add(&dsq->free_node, &dsqs_to_free); 3426 irq_work_queue(&free_dsq_irq_work); 3427 3428 out_unlock_dsq: 3429 raw_spin_unlock_irqrestore(&dsq->lock, flags); 3430 out_unlock_rcu: 3431 rcu_read_unlock(); 3432 } 3433 3434 #ifdef CONFIG_EXT_GROUP_SCHED 3435 static void scx_cgroup_exit(struct scx_sched *sch) 3436 { 3437 struct cgroup_subsys_state *css; 3438 3439 scx_cgroup_enabled = false; 3440 3441 /* 3442 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk 3443 * cgroups and exit all the inited ones, all online cgroups are exited. 3444 */ 3445 css_for_each_descendant_post(css, &root_task_group.css) { 3446 struct task_group *tg = css_tg(css); 3447 3448 if (!(tg->scx.flags & SCX_TG_INITED)) 3449 continue; 3450 tg->scx.flags &= ~SCX_TG_INITED; 3451 3452 if (!sch->ops.cgroup_exit) 3453 continue; 3454 3455 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_exit, NULL, 3456 css->cgroup); 3457 } 3458 } 3459 3460 static int scx_cgroup_init(struct scx_sched *sch) 3461 { 3462 struct cgroup_subsys_state *css; 3463 int ret; 3464 3465 /* 3466 * scx_tg_on/offline() are excluded through cgroup_lock(). If we walk 3467 * cgroups and init, all online cgroups are initialized. 3468 */ 3469 css_for_each_descendant_pre(css, &root_task_group.css) { 3470 struct task_group *tg = css_tg(css); 3471 struct scx_cgroup_init_args args = { 3472 .weight = tg->scx.weight, 3473 .bw_period_us = tg->scx.bw_period_us, 3474 .bw_quota_us = tg->scx.bw_quota_us, 3475 .bw_burst_us = tg->scx.bw_burst_us, 3476 }; 3477 3478 if ((tg->scx.flags & 3479 (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE) 3480 continue; 3481 3482 if (!sch->ops.cgroup_init) { 3483 tg->scx.flags |= SCX_TG_INITED; 3484 continue; 3485 } 3486 3487 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, NULL, 3488 css->cgroup, &args); 3489 if (ret) { 3490 css_put(css); 3491 scx_error(sch, "ops.cgroup_init() failed (%d)", ret); 3492 return ret; 3493 } 3494 tg->scx.flags |= SCX_TG_INITED; 3495 } 3496 3497 WARN_ON_ONCE(scx_cgroup_enabled); 3498 scx_cgroup_enabled = true; 3499 3500 return 0; 3501 } 3502 3503 #else 3504 static void scx_cgroup_exit(struct scx_sched *sch) {} 3505 static int scx_cgroup_init(struct scx_sched *sch) { return 0; } 3506 #endif 3507 3508 3509 /******************************************************************************** 3510 * Sysfs interface and ops enable/disable. 3511 */ 3512 3513 #define SCX_ATTR(_name) \ 3514 static struct kobj_attribute scx_attr_##_name = { \ 3515 .attr = { .name = __stringify(_name), .mode = 0444 }, \ 3516 .show = scx_attr_##_name##_show, \ 3517 } 3518 3519 static ssize_t scx_attr_state_show(struct kobject *kobj, 3520 struct kobj_attribute *ka, char *buf) 3521 { 3522 return sysfs_emit(buf, "%s\n", scx_enable_state_str[scx_enable_state()]); 3523 } 3524 SCX_ATTR(state); 3525 3526 static ssize_t scx_attr_switch_all_show(struct kobject *kobj, 3527 struct kobj_attribute *ka, char *buf) 3528 { 3529 return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all)); 3530 } 3531 SCX_ATTR(switch_all); 3532 3533 static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj, 3534 struct kobj_attribute *ka, char *buf) 3535 { 3536 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected)); 3537 } 3538 SCX_ATTR(nr_rejected); 3539 3540 static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj, 3541 struct kobj_attribute *ka, char *buf) 3542 { 3543 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq)); 3544 } 3545 SCX_ATTR(hotplug_seq); 3546 3547 static ssize_t scx_attr_enable_seq_show(struct kobject *kobj, 3548 struct kobj_attribute *ka, char *buf) 3549 { 3550 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq)); 3551 } 3552 SCX_ATTR(enable_seq); 3553 3554 static struct attribute *scx_global_attrs[] = { 3555 &scx_attr_state.attr, 3556 &scx_attr_switch_all.attr, 3557 &scx_attr_nr_rejected.attr, 3558 &scx_attr_hotplug_seq.attr, 3559 &scx_attr_enable_seq.attr, 3560 NULL, 3561 }; 3562 3563 static const struct attribute_group scx_global_attr_group = { 3564 .attrs = scx_global_attrs, 3565 }; 3566 3567 static void free_exit_info(struct scx_exit_info *ei); 3568 3569 static void scx_sched_free_rcu_work(struct work_struct *work) 3570 { 3571 struct rcu_work *rcu_work = to_rcu_work(work); 3572 struct scx_sched *sch = container_of(rcu_work, struct scx_sched, rcu_work); 3573 struct rhashtable_iter rht_iter; 3574 struct scx_dispatch_q *dsq; 3575 int node; 3576 3577 irq_work_sync(&sch->error_irq_work); 3578 kthread_stop(sch->helper->task); 3579 3580 free_percpu(sch->pcpu); 3581 3582 for_each_node_state(node, N_POSSIBLE) 3583 kfree(sch->global_dsqs[node]); 3584 kfree(sch->global_dsqs); 3585 3586 rhashtable_walk_enter(&sch->dsq_hash, &rht_iter); 3587 do { 3588 rhashtable_walk_start(&rht_iter); 3589 3590 while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq)) 3591 destroy_dsq(sch, dsq->id); 3592 3593 rhashtable_walk_stop(&rht_iter); 3594 } while (dsq == ERR_PTR(-EAGAIN)); 3595 rhashtable_walk_exit(&rht_iter); 3596 3597 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL); 3598 free_exit_info(sch->exit_info); 3599 kfree(sch); 3600 } 3601 3602 static void scx_kobj_release(struct kobject *kobj) 3603 { 3604 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj); 3605 3606 INIT_RCU_WORK(&sch->rcu_work, scx_sched_free_rcu_work); 3607 queue_rcu_work(system_unbound_wq, &sch->rcu_work); 3608 } 3609 3610 static ssize_t scx_attr_ops_show(struct kobject *kobj, 3611 struct kobj_attribute *ka, char *buf) 3612 { 3613 return sysfs_emit(buf, "%s\n", scx_root->ops.name); 3614 } 3615 SCX_ATTR(ops); 3616 3617 #define scx_attr_event_show(buf, at, events, kind) ({ \ 3618 sysfs_emit_at(buf, at, "%s %llu\n", #kind, (events)->kind); \ 3619 }) 3620 3621 static ssize_t scx_attr_events_show(struct kobject *kobj, 3622 struct kobj_attribute *ka, char *buf) 3623 { 3624 struct scx_sched *sch = container_of(kobj, struct scx_sched, kobj); 3625 struct scx_event_stats events; 3626 int at = 0; 3627 3628 scx_read_events(sch, &events); 3629 at += scx_attr_event_show(buf, at, &events, SCX_EV_SELECT_CPU_FALLBACK); 3630 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE); 3631 at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_KEEP_LAST); 3632 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_EXITING); 3633 at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED); 3634 at += scx_attr_event_show(buf, at, &events, SCX_EV_REFILL_SLICE_DFL); 3635 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DURATION); 3636 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DISPATCH); 3637 at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_ACTIVATE); 3638 return at; 3639 } 3640 SCX_ATTR(events); 3641 3642 static struct attribute *scx_sched_attrs[] = { 3643 &scx_attr_ops.attr, 3644 &scx_attr_events.attr, 3645 NULL, 3646 }; 3647 ATTRIBUTE_GROUPS(scx_sched); 3648 3649 static const struct kobj_type scx_ktype = { 3650 .release = scx_kobj_release, 3651 .sysfs_ops = &kobj_sysfs_ops, 3652 .default_groups = scx_sched_groups, 3653 }; 3654 3655 static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) 3656 { 3657 return add_uevent_var(env, "SCXOPS=%s", scx_root->ops.name); 3658 } 3659 3660 static const struct kset_uevent_ops scx_uevent_ops = { 3661 .uevent = scx_uevent, 3662 }; 3663 3664 /* 3665 * Used by sched_fork() and __setscheduler_prio() to pick the matching 3666 * sched_class. dl/rt are already handled. 3667 */ 3668 bool task_should_scx(int policy) 3669 { 3670 if (!scx_enabled() || unlikely(scx_enable_state() == SCX_DISABLING)) 3671 return false; 3672 if (READ_ONCE(scx_switching_all)) 3673 return true; 3674 return policy == SCHED_EXT; 3675 } 3676 3677 bool scx_allow_ttwu_queue(const struct task_struct *p) 3678 { 3679 struct scx_sched *sch; 3680 3681 if (!scx_enabled()) 3682 return true; 3683 3684 sch = rcu_dereference_sched(scx_root); 3685 if (unlikely(!sch)) 3686 return true; 3687 3688 if (sch->ops.flags & SCX_OPS_ALLOW_QUEUED_WAKEUP) 3689 return true; 3690 3691 if (unlikely(p->sched_class != &ext_sched_class)) 3692 return true; 3693 3694 return false; 3695 } 3696 3697 /** 3698 * handle_lockup - sched_ext common lockup handler 3699 * @fmt: format string 3700 * 3701 * Called on system stall or lockup condition and initiates abort of sched_ext 3702 * if enabled, which may resolve the reported lockup. 3703 * 3704 * Returns %true if sched_ext is enabled and abort was initiated, which may 3705 * resolve the lockup. %false if sched_ext is not enabled or abort was already 3706 * initiated by someone else. 3707 */ 3708 static __printf(1, 2) bool handle_lockup(const char *fmt, ...) 3709 { 3710 struct scx_sched *sch; 3711 va_list args; 3712 bool ret; 3713 3714 guard(rcu)(); 3715 3716 sch = rcu_dereference(scx_root); 3717 if (unlikely(!sch)) 3718 return false; 3719 3720 switch (scx_enable_state()) { 3721 case SCX_ENABLING: 3722 case SCX_ENABLED: 3723 va_start(args, fmt); 3724 ret = scx_verror(sch, fmt, args); 3725 va_end(args); 3726 return ret; 3727 default: 3728 return false; 3729 } 3730 } 3731 3732 /** 3733 * scx_rcu_cpu_stall - sched_ext RCU CPU stall handler 3734 * 3735 * While there are various reasons why RCU CPU stalls can occur on a system 3736 * that may not be caused by the current BPF scheduler, try kicking out the 3737 * current scheduler in an attempt to recover the system to a good state before 3738 * issuing panics. 3739 * 3740 * Returns %true if sched_ext is enabled and abort was initiated, which may 3741 * resolve the reported RCU stall. %false if sched_ext is not enabled or someone 3742 * else already initiated abort. 3743 */ 3744 bool scx_rcu_cpu_stall(void) 3745 { 3746 return handle_lockup("RCU CPU stall detected!"); 3747 } 3748 3749 /** 3750 * scx_softlockup - sched_ext softlockup handler 3751 * @dur_s: number of seconds of CPU stuck due to soft lockup 3752 * 3753 * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can 3754 * live-lock the system by making many CPUs target the same DSQ to the point 3755 * where soft-lockup detection triggers. This function is called from 3756 * soft-lockup watchdog when the triggering point is close and tries to unjam 3757 * the system and aborting the BPF scheduler. 3758 */ 3759 void scx_softlockup(u32 dur_s) 3760 { 3761 if (!handle_lockup("soft lockup - CPU %d stuck for %us", smp_processor_id(), dur_s)) 3762 return; 3763 3764 printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU %d stuck for %us, disabling BPF scheduler\n", 3765 smp_processor_id(), dur_s); 3766 } 3767 3768 /** 3769 * scx_hardlockup - sched_ext hardlockup handler 3770 * 3771 * A poorly behaving BPF scheduler can trigger hard lockup by e.g. putting 3772 * numerous affinitized tasks in a single queue and directing all CPUs at it. 3773 * Try kicking out the current scheduler in an attempt to recover the system to 3774 * a good state before taking more drastic actions. 3775 * 3776 * Returns %true if sched_ext is enabled and abort was initiated, which may 3777 * resolve the reported hardlockdup. %false if sched_ext is not enabled or 3778 * someone else already initiated abort. 3779 */ 3780 bool scx_hardlockup(int cpu) 3781 { 3782 if (!handle_lockup("hard lockup - CPU %d", cpu)) 3783 return false; 3784 3785 printk_deferred(KERN_ERR "sched_ext: Hard lockup - CPU %d, disabling BPF scheduler\n", 3786 cpu); 3787 return true; 3788 } 3789 3790 static u32 bypass_lb_cpu(struct scx_sched *sch, struct rq *rq, 3791 struct cpumask *donee_mask, struct cpumask *resched_mask, 3792 u32 nr_donor_target, u32 nr_donee_target) 3793 { 3794 struct scx_dispatch_q *donor_dsq = &rq->scx.bypass_dsq; 3795 struct task_struct *p, *n; 3796 struct scx_dsq_list_node cursor = INIT_DSQ_LIST_CURSOR(cursor, 0, 0); 3797 s32 delta = READ_ONCE(donor_dsq->nr) - nr_donor_target; 3798 u32 nr_balanced = 0, min_delta_us; 3799 3800 /* 3801 * All we want to guarantee is reasonable forward progress. No reason to 3802 * fine tune. Assuming every task on @donor_dsq runs their full slice, 3803 * consider offloading iff the total queued duration is over the 3804 * threshold. 3805 */ 3806 min_delta_us = scx_bypass_lb_intv_us / SCX_BYPASS_LB_MIN_DELTA_DIV; 3807 if (delta < DIV_ROUND_UP(min_delta_us, scx_slice_bypass_us)) 3808 return 0; 3809 3810 raw_spin_rq_lock_irq(rq); 3811 raw_spin_lock(&donor_dsq->lock); 3812 list_add(&cursor.node, &donor_dsq->list); 3813 resume: 3814 n = container_of(&cursor, struct task_struct, scx.dsq_list); 3815 n = nldsq_next_task(donor_dsq, n, false); 3816 3817 while ((p = n)) { 3818 struct rq *donee_rq; 3819 struct scx_dispatch_q *donee_dsq; 3820 int donee; 3821 3822 n = nldsq_next_task(donor_dsq, n, false); 3823 3824 if (donor_dsq->nr <= nr_donor_target) 3825 break; 3826 3827 if (cpumask_empty(donee_mask)) 3828 break; 3829 3830 donee = cpumask_any_and_distribute(donee_mask, p->cpus_ptr); 3831 if (donee >= nr_cpu_ids) 3832 continue; 3833 3834 donee_rq = cpu_rq(donee); 3835 donee_dsq = &donee_rq->scx.bypass_dsq; 3836 3837 /* 3838 * $p's rq is not locked but $p's DSQ lock protects its 3839 * scheduling properties making this test safe. 3840 */ 3841 if (!task_can_run_on_remote_rq(sch, p, donee_rq, false)) 3842 continue; 3843 3844 /* 3845 * Moving $p from one non-local DSQ to another. The source rq 3846 * and DSQ are already locked. Do an abbreviated dequeue and 3847 * then perform enqueue without unlocking $donor_dsq. 3848 * 3849 * We don't want to drop and reacquire the lock on each 3850 * iteration as @donor_dsq can be very long and potentially 3851 * highly contended. Donee DSQs are less likely to be contended. 3852 * The nested locking is safe as only this LB moves tasks 3853 * between bypass DSQs. 3854 */ 3855 dispatch_dequeue_locked(p, donor_dsq); 3856 dispatch_enqueue(sch, donee_dsq, p, SCX_ENQ_NESTED); 3857 3858 /* 3859 * $donee might have been idle and need to be woken up. No need 3860 * to be clever. Kick every CPU that receives tasks. 3861 */ 3862 cpumask_set_cpu(donee, resched_mask); 3863 3864 if (READ_ONCE(donee_dsq->nr) >= nr_donee_target) 3865 cpumask_clear_cpu(donee, donee_mask); 3866 3867 nr_balanced++; 3868 if (!(nr_balanced % SCX_BYPASS_LB_BATCH) && n) { 3869 list_move_tail(&cursor.node, &n->scx.dsq_list.node); 3870 raw_spin_unlock(&donor_dsq->lock); 3871 raw_spin_rq_unlock_irq(rq); 3872 cpu_relax(); 3873 raw_spin_rq_lock_irq(rq); 3874 raw_spin_lock(&donor_dsq->lock); 3875 goto resume; 3876 } 3877 } 3878 3879 list_del_init(&cursor.node); 3880 raw_spin_unlock(&donor_dsq->lock); 3881 raw_spin_rq_unlock_irq(rq); 3882 3883 return nr_balanced; 3884 } 3885 3886 static void bypass_lb_node(struct scx_sched *sch, int node) 3887 { 3888 const struct cpumask *node_mask = cpumask_of_node(node); 3889 struct cpumask *donee_mask = scx_bypass_lb_donee_cpumask; 3890 struct cpumask *resched_mask = scx_bypass_lb_resched_cpumask; 3891 u32 nr_tasks = 0, nr_cpus = 0, nr_balanced = 0; 3892 u32 nr_target, nr_donor_target; 3893 u32 before_min = U32_MAX, before_max = 0; 3894 u32 after_min = U32_MAX, after_max = 0; 3895 int cpu; 3896 3897 /* count the target tasks and CPUs */ 3898 for_each_cpu_and(cpu, cpu_online_mask, node_mask) { 3899 u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr); 3900 3901 nr_tasks += nr; 3902 nr_cpus++; 3903 3904 before_min = min(nr, before_min); 3905 before_max = max(nr, before_max); 3906 } 3907 3908 if (!nr_cpus) 3909 return; 3910 3911 /* 3912 * We don't want CPUs to have more than $nr_donor_target tasks and 3913 * balancing to fill donee CPUs upto $nr_target. Once targets are 3914 * calculated, find the donee CPUs. 3915 */ 3916 nr_target = DIV_ROUND_UP(nr_tasks, nr_cpus); 3917 nr_donor_target = DIV_ROUND_UP(nr_target * SCX_BYPASS_LB_DONOR_PCT, 100); 3918 3919 cpumask_clear(donee_mask); 3920 for_each_cpu_and(cpu, cpu_online_mask, node_mask) { 3921 if (READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr) < nr_target) 3922 cpumask_set_cpu(cpu, donee_mask); 3923 } 3924 3925 /* iterate !donee CPUs and see if they should be offloaded */ 3926 cpumask_clear(resched_mask); 3927 for_each_cpu_and(cpu, cpu_online_mask, node_mask) { 3928 struct rq *rq = cpu_rq(cpu); 3929 struct scx_dispatch_q *donor_dsq = &rq->scx.bypass_dsq; 3930 3931 if (cpumask_empty(donee_mask)) 3932 break; 3933 if (cpumask_test_cpu(cpu, donee_mask)) 3934 continue; 3935 if (READ_ONCE(donor_dsq->nr) <= nr_donor_target) 3936 continue; 3937 3938 nr_balanced += bypass_lb_cpu(sch, rq, donee_mask, resched_mask, 3939 nr_donor_target, nr_target); 3940 } 3941 3942 for_each_cpu(cpu, resched_mask) { 3943 struct rq *rq = cpu_rq(cpu); 3944 3945 raw_spin_rq_lock_irq(rq); 3946 resched_curr(rq); 3947 raw_spin_rq_unlock_irq(rq); 3948 } 3949 3950 for_each_cpu_and(cpu, cpu_online_mask, node_mask) { 3951 u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr); 3952 3953 after_min = min(nr, after_min); 3954 after_max = max(nr, after_max); 3955 3956 } 3957 3958 trace_sched_ext_bypass_lb(node, nr_cpus, nr_tasks, nr_balanced, 3959 before_min, before_max, after_min, after_max); 3960 } 3961 3962 /* 3963 * In bypass mode, all tasks are put on the per-CPU bypass DSQs. If the machine 3964 * is over-saturated and the BPF scheduler skewed tasks into few CPUs, some 3965 * bypass DSQs can be overloaded. If there are enough tasks to saturate other 3966 * lightly loaded CPUs, such imbalance can lead to very high execution latency 3967 * on the overloaded CPUs and thus to hung tasks and RCU stalls. To avoid such 3968 * outcomes, a simple load balancing mechanism is implemented by the following 3969 * timer which runs periodically while bypass mode is in effect. 3970 */ 3971 static void scx_bypass_lb_timerfn(struct timer_list *timer) 3972 { 3973 struct scx_sched *sch; 3974 int node; 3975 u32 intv_us; 3976 3977 sch = rcu_dereference_all(scx_root); 3978 if (unlikely(!sch) || !READ_ONCE(scx_bypass_depth)) 3979 return; 3980 3981 for_each_node_with_cpus(node) 3982 bypass_lb_node(sch, node); 3983 3984 intv_us = READ_ONCE(scx_bypass_lb_intv_us); 3985 if (intv_us) 3986 mod_timer(timer, jiffies + usecs_to_jiffies(intv_us)); 3987 } 3988 3989 static DEFINE_TIMER(scx_bypass_lb_timer, scx_bypass_lb_timerfn); 3990 3991 /** 3992 * scx_bypass - [Un]bypass scx_ops and guarantee forward progress 3993 * @bypass: true for bypass, false for unbypass 3994 * 3995 * Bypassing guarantees that all runnable tasks make forward progress without 3996 * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might 3997 * be held by tasks that the BPF scheduler is forgetting to run, which 3998 * unfortunately also excludes toggling the static branches. 3999 * 4000 * Let's work around by overriding a couple ops and modifying behaviors based on 4001 * the DISABLING state and then cycling the queued tasks through dequeue/enqueue 4002 * to force global FIFO scheduling. 4003 * 4004 * - ops.select_cpu() is ignored and the default select_cpu() is used. 4005 * 4006 * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order. 4007 * %SCX_OPS_ENQ_LAST is also ignored. 4008 * 4009 * - ops.dispatch() is ignored. 4010 * 4011 * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice 4012 * can't be trusted. Whenever a tick triggers, the running task is rotated to 4013 * the tail of the queue with core_sched_at touched. 4014 * 4015 * - pick_next_task() suppresses zero slice warning. 4016 * 4017 * - scx_kick_cpu() is disabled to avoid irq_work malfunction during PM 4018 * operations. 4019 * 4020 * - scx_prio_less() reverts to the default core_sched_at order. 4021 */ 4022 static void scx_bypass(bool bypass) 4023 { 4024 static DEFINE_RAW_SPINLOCK(bypass_lock); 4025 static unsigned long bypass_timestamp; 4026 struct scx_sched *sch; 4027 unsigned long flags; 4028 int cpu; 4029 4030 raw_spin_lock_irqsave(&bypass_lock, flags); 4031 sch = rcu_dereference_bh(scx_root); 4032 4033 if (bypass) { 4034 u32 intv_us; 4035 4036 WRITE_ONCE(scx_bypass_depth, scx_bypass_depth + 1); 4037 WARN_ON_ONCE(scx_bypass_depth <= 0); 4038 if (scx_bypass_depth != 1) 4039 goto unlock; 4040 WRITE_ONCE(scx_slice_dfl, scx_slice_bypass_us * NSEC_PER_USEC); 4041 bypass_timestamp = ktime_get_ns(); 4042 if (sch) 4043 scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1); 4044 4045 intv_us = READ_ONCE(scx_bypass_lb_intv_us); 4046 if (intv_us && !timer_pending(&scx_bypass_lb_timer)) { 4047 scx_bypass_lb_timer.expires = 4048 jiffies + usecs_to_jiffies(intv_us); 4049 add_timer_global(&scx_bypass_lb_timer); 4050 } 4051 } else { 4052 WRITE_ONCE(scx_bypass_depth, scx_bypass_depth - 1); 4053 WARN_ON_ONCE(scx_bypass_depth < 0); 4054 if (scx_bypass_depth != 0) 4055 goto unlock; 4056 WRITE_ONCE(scx_slice_dfl, SCX_SLICE_DFL); 4057 if (sch) 4058 scx_add_event(sch, SCX_EV_BYPASS_DURATION, 4059 ktime_get_ns() - bypass_timestamp); 4060 } 4061 4062 /* 4063 * No task property is changing. We just need to make sure all currently 4064 * queued tasks are re-queued according to the new scx_rq_bypassing() 4065 * state. As an optimization, walk each rq's runnable_list instead of 4066 * the scx_tasks list. 4067 * 4068 * This function can't trust the scheduler and thus can't use 4069 * cpus_read_lock(). Walk all possible CPUs instead of online. 4070 */ 4071 for_each_possible_cpu(cpu) { 4072 struct rq *rq = cpu_rq(cpu); 4073 struct task_struct *p, *n; 4074 4075 raw_spin_rq_lock(rq); 4076 4077 if (bypass) { 4078 WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING); 4079 rq->scx.flags |= SCX_RQ_BYPASSING; 4080 } else { 4081 WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING)); 4082 rq->scx.flags &= ~SCX_RQ_BYPASSING; 4083 } 4084 4085 /* 4086 * We need to guarantee that no tasks are on the BPF scheduler 4087 * while bypassing. Either we see enabled or the enable path 4088 * sees scx_rq_bypassing() before moving tasks to SCX. 4089 */ 4090 if (!scx_enabled()) { 4091 raw_spin_rq_unlock(rq); 4092 continue; 4093 } 4094 4095 /* 4096 * The use of list_for_each_entry_safe_reverse() is required 4097 * because each task is going to be removed from and added back 4098 * to the runnable_list during iteration. Because they're added 4099 * to the tail of the list, safe reverse iteration can still 4100 * visit all nodes. 4101 */ 4102 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list, 4103 scx.runnable_node) { 4104 /* cycling deq/enq is enough, see the function comment */ 4105 scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_MOVE) { 4106 /* nothing */ ; 4107 } 4108 } 4109 4110 /* resched to restore ticks and idle state */ 4111 if (cpu_online(cpu) || cpu == smp_processor_id()) 4112 resched_curr(rq); 4113 4114 raw_spin_rq_unlock(rq); 4115 } 4116 4117 unlock: 4118 raw_spin_unlock_irqrestore(&bypass_lock, flags); 4119 } 4120 4121 static void free_exit_info(struct scx_exit_info *ei) 4122 { 4123 kvfree(ei->dump); 4124 kfree(ei->msg); 4125 kfree(ei->bt); 4126 kfree(ei); 4127 } 4128 4129 static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len) 4130 { 4131 struct scx_exit_info *ei; 4132 4133 ei = kzalloc(sizeof(*ei), GFP_KERNEL); 4134 if (!ei) 4135 return NULL; 4136 4137 ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL); 4138 ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL); 4139 ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL); 4140 4141 if (!ei->bt || !ei->msg || !ei->dump) { 4142 free_exit_info(ei); 4143 return NULL; 4144 } 4145 4146 return ei; 4147 } 4148 4149 static const char *scx_exit_reason(enum scx_exit_kind kind) 4150 { 4151 switch (kind) { 4152 case SCX_EXIT_UNREG: 4153 return "unregistered from user space"; 4154 case SCX_EXIT_UNREG_BPF: 4155 return "unregistered from BPF"; 4156 case SCX_EXIT_UNREG_KERN: 4157 return "unregistered from the main kernel"; 4158 case SCX_EXIT_SYSRQ: 4159 return "disabled by sysrq-S"; 4160 case SCX_EXIT_ERROR: 4161 return "runtime error"; 4162 case SCX_EXIT_ERROR_BPF: 4163 return "scx_bpf_error"; 4164 case SCX_EXIT_ERROR_STALL: 4165 return "runnable task stall"; 4166 default: 4167 return "<UNKNOWN>"; 4168 } 4169 } 4170 4171 static void free_kick_syncs(void) 4172 { 4173 int cpu; 4174 4175 for_each_possible_cpu(cpu) { 4176 struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu); 4177 struct scx_kick_syncs *to_free; 4178 4179 to_free = rcu_replace_pointer(*ksyncs, NULL, true); 4180 if (to_free) 4181 kvfree_rcu(to_free, rcu); 4182 } 4183 } 4184 4185 static void scx_disable_workfn(struct kthread_work *work) 4186 { 4187 struct scx_sched *sch = container_of(work, struct scx_sched, disable_work); 4188 struct scx_exit_info *ei = sch->exit_info; 4189 struct scx_task_iter sti; 4190 struct task_struct *p; 4191 int kind, cpu; 4192 4193 kind = atomic_read(&sch->exit_kind); 4194 while (true) { 4195 if (kind == SCX_EXIT_DONE) /* already disabled? */ 4196 return; 4197 WARN_ON_ONCE(kind == SCX_EXIT_NONE); 4198 if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE)) 4199 break; 4200 } 4201 ei->kind = kind; 4202 ei->reason = scx_exit_reason(ei->kind); 4203 4204 /* guarantee forward progress by bypassing scx_ops */ 4205 scx_bypass(true); 4206 WRITE_ONCE(scx_aborting, false); 4207 4208 switch (scx_set_enable_state(SCX_DISABLING)) { 4209 case SCX_DISABLING: 4210 WARN_ONCE(true, "sched_ext: duplicate disabling instance?"); 4211 break; 4212 case SCX_DISABLED: 4213 pr_warn("sched_ext: ops error detected without ops (%s)\n", 4214 sch->exit_info->msg); 4215 WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING); 4216 goto done; 4217 default: 4218 break; 4219 } 4220 4221 /* 4222 * Here, every runnable task is guaranteed to make forward progress and 4223 * we can safely use blocking synchronization constructs. Actually 4224 * disable ops. 4225 */ 4226 mutex_lock(&scx_enable_mutex); 4227 4228 static_branch_disable(&__scx_switched_all); 4229 WRITE_ONCE(scx_switching_all, false); 4230 4231 /* 4232 * Shut down cgroup support before tasks so that the cgroup attach path 4233 * doesn't race against scx_exit_task(). 4234 */ 4235 scx_cgroup_lock(); 4236 scx_cgroup_exit(sch); 4237 scx_cgroup_unlock(); 4238 4239 /* 4240 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones 4241 * must be switched out and exited synchronously. 4242 */ 4243 percpu_down_write(&scx_fork_rwsem); 4244 4245 scx_init_task_enabled = false; 4246 4247 scx_task_iter_start(&sti); 4248 while ((p = scx_task_iter_next_locked(&sti))) { 4249 unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 4250 const struct sched_class *old_class = p->sched_class; 4251 const struct sched_class *new_class = scx_setscheduler_class(p); 4252 4253 update_rq_clock(task_rq(p)); 4254 4255 if (old_class != new_class) 4256 queue_flags |= DEQUEUE_CLASS; 4257 4258 scoped_guard (sched_change, p, queue_flags) { 4259 p->sched_class = new_class; 4260 } 4261 4262 scx_exit_task(p); 4263 } 4264 scx_task_iter_stop(&sti); 4265 percpu_up_write(&scx_fork_rwsem); 4266 4267 /* 4268 * Invalidate all the rq clocks to prevent getting outdated 4269 * rq clocks from a previous scx scheduler. 4270 */ 4271 for_each_possible_cpu(cpu) { 4272 struct rq *rq = cpu_rq(cpu); 4273 scx_rq_clock_invalidate(rq); 4274 } 4275 4276 /* no task is on scx, turn off all the switches and flush in-progress calls */ 4277 static_branch_disable(&__scx_enabled); 4278 bitmap_zero(sch->has_op, SCX_OPI_END); 4279 scx_idle_disable(); 4280 synchronize_rcu(); 4281 4282 if (ei->kind >= SCX_EXIT_ERROR) { 4283 pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n", 4284 sch->ops.name, ei->reason); 4285 4286 if (ei->msg[0] != '\0') 4287 pr_err("sched_ext: %s: %s\n", sch->ops.name, ei->msg); 4288 #ifdef CONFIG_STACKTRACE 4289 stack_trace_print(ei->bt, ei->bt_len, 2); 4290 #endif 4291 } else { 4292 pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n", 4293 sch->ops.name, ei->reason); 4294 } 4295 4296 if (sch->ops.exit) 4297 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, exit, NULL, ei); 4298 4299 cancel_delayed_work_sync(&scx_watchdog_work); 4300 4301 /* 4302 * scx_root clearing must be inside cpus_read_lock(). See 4303 * handle_hotplug(). 4304 */ 4305 cpus_read_lock(); 4306 RCU_INIT_POINTER(scx_root, NULL); 4307 cpus_read_unlock(); 4308 4309 /* 4310 * Delete the kobject from the hierarchy synchronously. Otherwise, sysfs 4311 * could observe an object of the same name still in the hierarchy when 4312 * the next scheduler is loaded. 4313 */ 4314 kobject_del(&sch->kobj); 4315 4316 free_percpu(scx_dsp_ctx); 4317 scx_dsp_ctx = NULL; 4318 scx_dsp_max_batch = 0; 4319 free_kick_syncs(); 4320 4321 mutex_unlock(&scx_enable_mutex); 4322 4323 WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING); 4324 done: 4325 scx_bypass(false); 4326 } 4327 4328 static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind) 4329 { 4330 int none = SCX_EXIT_NONE; 4331 4332 if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind)) 4333 return false; 4334 4335 /* 4336 * Some CPUs may be trapped in the dispatch paths. Set the aborting 4337 * flag to break potential live-lock scenarios, ensuring we can 4338 * successfully reach scx_bypass(). 4339 */ 4340 WRITE_ONCE(scx_aborting, true); 4341 return true; 4342 } 4343 4344 static void scx_disable(enum scx_exit_kind kind) 4345 { 4346 struct scx_sched *sch; 4347 4348 if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)) 4349 kind = SCX_EXIT_ERROR; 4350 4351 rcu_read_lock(); 4352 sch = rcu_dereference(scx_root); 4353 if (sch) { 4354 scx_claim_exit(sch, kind); 4355 kthread_queue_work(sch->helper, &sch->disable_work); 4356 } 4357 rcu_read_unlock(); 4358 } 4359 4360 static void dump_newline(struct seq_buf *s) 4361 { 4362 trace_sched_ext_dump(""); 4363 4364 /* @s may be zero sized and seq_buf triggers WARN if so */ 4365 if (s->size) 4366 seq_buf_putc(s, '\n'); 4367 } 4368 4369 static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...) 4370 { 4371 va_list args; 4372 4373 #ifdef CONFIG_TRACEPOINTS 4374 if (trace_sched_ext_dump_enabled()) { 4375 /* protected by scx_dump_state()::dump_lock */ 4376 static char line_buf[SCX_EXIT_MSG_LEN]; 4377 4378 va_start(args, fmt); 4379 vscnprintf(line_buf, sizeof(line_buf), fmt, args); 4380 va_end(args); 4381 4382 trace_sched_ext_dump(line_buf); 4383 } 4384 #endif 4385 /* @s may be zero sized and seq_buf triggers WARN if so */ 4386 if (s->size) { 4387 va_start(args, fmt); 4388 seq_buf_vprintf(s, fmt, args); 4389 va_end(args); 4390 4391 seq_buf_putc(s, '\n'); 4392 } 4393 } 4394 4395 static void dump_stack_trace(struct seq_buf *s, const char *prefix, 4396 const unsigned long *bt, unsigned int len) 4397 { 4398 unsigned int i; 4399 4400 for (i = 0; i < len; i++) 4401 dump_line(s, "%s%pS", prefix, (void *)bt[i]); 4402 } 4403 4404 static void ops_dump_init(struct seq_buf *s, const char *prefix) 4405 { 4406 struct scx_dump_data *dd = &scx_dump_data; 4407 4408 lockdep_assert_irqs_disabled(); 4409 4410 dd->cpu = smp_processor_id(); /* allow scx_bpf_dump() */ 4411 dd->first = true; 4412 dd->cursor = 0; 4413 dd->s = s; 4414 dd->prefix = prefix; 4415 } 4416 4417 static void ops_dump_flush(void) 4418 { 4419 struct scx_dump_data *dd = &scx_dump_data; 4420 char *line = dd->buf.line; 4421 4422 if (!dd->cursor) 4423 return; 4424 4425 /* 4426 * There's something to flush and this is the first line. Insert a blank 4427 * line to distinguish ops dump. 4428 */ 4429 if (dd->first) { 4430 dump_newline(dd->s); 4431 dd->first = false; 4432 } 4433 4434 /* 4435 * There may be multiple lines in $line. Scan and emit each line 4436 * separately. 4437 */ 4438 while (true) { 4439 char *end = line; 4440 char c; 4441 4442 while (*end != '\n' && *end != '\0') 4443 end++; 4444 4445 /* 4446 * If $line overflowed, it may not have newline at the end. 4447 * Always emit with a newline. 4448 */ 4449 c = *end; 4450 *end = '\0'; 4451 dump_line(dd->s, "%s%s", dd->prefix, line); 4452 if (c == '\0') 4453 break; 4454 4455 /* move to the next line */ 4456 end++; 4457 if (*end == '\0') 4458 break; 4459 line = end; 4460 } 4461 4462 dd->cursor = 0; 4463 } 4464 4465 static void ops_dump_exit(void) 4466 { 4467 ops_dump_flush(); 4468 scx_dump_data.cpu = -1; 4469 } 4470 4471 static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx, 4472 struct task_struct *p, char marker) 4473 { 4474 static unsigned long bt[SCX_EXIT_BT_LEN]; 4475 struct scx_sched *sch = scx_root; 4476 char dsq_id_buf[19] = "(n/a)"; 4477 unsigned long ops_state = atomic_long_read(&p->scx.ops_state); 4478 unsigned int bt_len = 0; 4479 4480 if (p->scx.dsq) 4481 scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx", 4482 (unsigned long long)p->scx.dsq->id); 4483 4484 dump_newline(s); 4485 dump_line(s, " %c%c %s[%d] %+ldms", 4486 marker, task_state_to_char(p), p->comm, p->pid, 4487 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies)); 4488 dump_line(s, " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu", 4489 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK, 4490 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK, 4491 ops_state >> SCX_OPSS_QSEQ_SHIFT); 4492 dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s", 4493 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf); 4494 dump_line(s, " dsq_vtime=%llu slice=%llu weight=%u", 4495 p->scx.dsq_vtime, p->scx.slice, p->scx.weight); 4496 dump_line(s, " cpus=%*pb no_mig=%u", cpumask_pr_args(p->cpus_ptr), 4497 p->migration_disabled); 4498 4499 if (SCX_HAS_OP(sch, dump_task)) { 4500 ops_dump_init(s, " "); 4501 SCX_CALL_OP(sch, SCX_KF_REST, dump_task, NULL, dctx, p); 4502 ops_dump_exit(); 4503 } 4504 4505 #ifdef CONFIG_STACKTRACE 4506 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1); 4507 #endif 4508 if (bt_len) { 4509 dump_newline(s); 4510 dump_stack_trace(s, " ", bt, bt_len); 4511 } 4512 } 4513 4514 static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len) 4515 { 4516 static DEFINE_SPINLOCK(dump_lock); 4517 static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n"; 4518 struct scx_sched *sch = scx_root; 4519 struct scx_dump_ctx dctx = { 4520 .kind = ei->kind, 4521 .exit_code = ei->exit_code, 4522 .reason = ei->reason, 4523 .at_ns = ktime_get_ns(), 4524 .at_jiffies = jiffies, 4525 }; 4526 struct seq_buf s; 4527 struct scx_event_stats events; 4528 unsigned long flags; 4529 char *buf; 4530 int cpu; 4531 4532 spin_lock_irqsave(&dump_lock, flags); 4533 4534 seq_buf_init(&s, ei->dump, dump_len); 4535 4536 if (ei->kind == SCX_EXIT_NONE) { 4537 dump_line(&s, "Debug dump triggered by %s", ei->reason); 4538 } else { 4539 dump_line(&s, "%s[%d] triggered exit kind %d:", 4540 current->comm, current->pid, ei->kind); 4541 dump_line(&s, " %s (%s)", ei->reason, ei->msg); 4542 dump_newline(&s); 4543 dump_line(&s, "Backtrace:"); 4544 dump_stack_trace(&s, " ", ei->bt, ei->bt_len); 4545 } 4546 4547 if (SCX_HAS_OP(sch, dump)) { 4548 ops_dump_init(&s, ""); 4549 SCX_CALL_OP(sch, SCX_KF_UNLOCKED, dump, NULL, &dctx); 4550 ops_dump_exit(); 4551 } 4552 4553 dump_newline(&s); 4554 dump_line(&s, "CPU states"); 4555 dump_line(&s, "----------"); 4556 4557 for_each_possible_cpu(cpu) { 4558 struct rq *rq = cpu_rq(cpu); 4559 struct rq_flags rf; 4560 struct task_struct *p; 4561 struct seq_buf ns; 4562 size_t avail, used; 4563 bool idle; 4564 4565 rq_lock_irqsave(rq, &rf); 4566 4567 idle = list_empty(&rq->scx.runnable_list) && 4568 rq->curr->sched_class == &idle_sched_class; 4569 4570 if (idle && !SCX_HAS_OP(sch, dump_cpu)) 4571 goto next; 4572 4573 /* 4574 * We don't yet know whether ops.dump_cpu() will produce output 4575 * and we may want to skip the default CPU dump if it doesn't. 4576 * Use a nested seq_buf to generate the standard dump so that we 4577 * can decide whether to commit later. 4578 */ 4579 avail = seq_buf_get_buf(&s, &buf); 4580 seq_buf_init(&ns, buf, avail); 4581 4582 dump_newline(&ns); 4583 dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu ksync=%lu", 4584 cpu, rq->scx.nr_running, rq->scx.flags, 4585 rq->scx.cpu_released, rq->scx.ops_qseq, 4586 rq->scx.kick_sync); 4587 dump_line(&ns, " curr=%s[%d] class=%ps", 4588 rq->curr->comm, rq->curr->pid, 4589 rq->curr->sched_class); 4590 if (!cpumask_empty(rq->scx.cpus_to_kick)) 4591 dump_line(&ns, " cpus_to_kick : %*pb", 4592 cpumask_pr_args(rq->scx.cpus_to_kick)); 4593 if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle)) 4594 dump_line(&ns, " idle_to_kick : %*pb", 4595 cpumask_pr_args(rq->scx.cpus_to_kick_if_idle)); 4596 if (!cpumask_empty(rq->scx.cpus_to_preempt)) 4597 dump_line(&ns, " cpus_to_preempt: %*pb", 4598 cpumask_pr_args(rq->scx.cpus_to_preempt)); 4599 if (!cpumask_empty(rq->scx.cpus_to_wait)) 4600 dump_line(&ns, " cpus_to_wait : %*pb", 4601 cpumask_pr_args(rq->scx.cpus_to_wait)); 4602 4603 used = seq_buf_used(&ns); 4604 if (SCX_HAS_OP(sch, dump_cpu)) { 4605 ops_dump_init(&ns, " "); 4606 SCX_CALL_OP(sch, SCX_KF_REST, dump_cpu, NULL, 4607 &dctx, cpu, idle); 4608 ops_dump_exit(); 4609 } 4610 4611 /* 4612 * If idle && nothing generated by ops.dump_cpu(), there's 4613 * nothing interesting. Skip. 4614 */ 4615 if (idle && used == seq_buf_used(&ns)) 4616 goto next; 4617 4618 /* 4619 * $s may already have overflowed when $ns was created. If so, 4620 * calling commit on it will trigger BUG. 4621 */ 4622 if (avail) { 4623 seq_buf_commit(&s, seq_buf_used(&ns)); 4624 if (seq_buf_has_overflowed(&ns)) 4625 seq_buf_set_overflow(&s); 4626 } 4627 4628 if (rq->curr->sched_class == &ext_sched_class) 4629 scx_dump_task(&s, &dctx, rq->curr, '*'); 4630 4631 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) 4632 scx_dump_task(&s, &dctx, p, ' '); 4633 next: 4634 rq_unlock_irqrestore(rq, &rf); 4635 } 4636 4637 dump_newline(&s); 4638 dump_line(&s, "Event counters"); 4639 dump_line(&s, "--------------"); 4640 4641 scx_read_events(sch, &events); 4642 scx_dump_event(s, &events, SCX_EV_SELECT_CPU_FALLBACK); 4643 scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE); 4644 scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST); 4645 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_EXITING); 4646 scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED); 4647 scx_dump_event(s, &events, SCX_EV_REFILL_SLICE_DFL); 4648 scx_dump_event(s, &events, SCX_EV_BYPASS_DURATION); 4649 scx_dump_event(s, &events, SCX_EV_BYPASS_DISPATCH); 4650 scx_dump_event(s, &events, SCX_EV_BYPASS_ACTIVATE); 4651 4652 if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker)) 4653 memcpy(ei->dump + dump_len - sizeof(trunc_marker), 4654 trunc_marker, sizeof(trunc_marker)); 4655 4656 spin_unlock_irqrestore(&dump_lock, flags); 4657 } 4658 4659 static void scx_error_irq_workfn(struct irq_work *irq_work) 4660 { 4661 struct scx_sched *sch = container_of(irq_work, struct scx_sched, error_irq_work); 4662 struct scx_exit_info *ei = sch->exit_info; 4663 4664 if (ei->kind >= SCX_EXIT_ERROR) 4665 scx_dump_state(ei, sch->ops.exit_dump_len); 4666 4667 kthread_queue_work(sch->helper, &sch->disable_work); 4668 } 4669 4670 static bool scx_vexit(struct scx_sched *sch, 4671 enum scx_exit_kind kind, s64 exit_code, 4672 const char *fmt, va_list args) 4673 { 4674 struct scx_exit_info *ei = sch->exit_info; 4675 4676 if (!scx_claim_exit(sch, kind)) 4677 return false; 4678 4679 ei->exit_code = exit_code; 4680 #ifdef CONFIG_STACKTRACE 4681 if (kind >= SCX_EXIT_ERROR) 4682 ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1); 4683 #endif 4684 vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args); 4685 4686 /* 4687 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again 4688 * in scx_disable_workfn(). 4689 */ 4690 ei->kind = kind; 4691 ei->reason = scx_exit_reason(ei->kind); 4692 4693 irq_work_queue(&sch->error_irq_work); 4694 return true; 4695 } 4696 4697 static int alloc_kick_syncs(void) 4698 { 4699 int cpu; 4700 4701 /* 4702 * Allocate per-CPU arrays sized by nr_cpu_ids. Use kvzalloc as size 4703 * can exceed percpu allocator limits on large machines. 4704 */ 4705 for_each_possible_cpu(cpu) { 4706 struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu); 4707 struct scx_kick_syncs *new_ksyncs; 4708 4709 WARN_ON_ONCE(rcu_access_pointer(*ksyncs)); 4710 4711 new_ksyncs = kvzalloc_node(struct_size(new_ksyncs, syncs, nr_cpu_ids), 4712 GFP_KERNEL, cpu_to_node(cpu)); 4713 if (!new_ksyncs) { 4714 free_kick_syncs(); 4715 return -ENOMEM; 4716 } 4717 4718 rcu_assign_pointer(*ksyncs, new_ksyncs); 4719 } 4720 4721 return 0; 4722 } 4723 4724 static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops) 4725 { 4726 struct scx_sched *sch; 4727 int node, ret; 4728 4729 sch = kzalloc(sizeof(*sch), GFP_KERNEL); 4730 if (!sch) 4731 return ERR_PTR(-ENOMEM); 4732 4733 sch->exit_info = alloc_exit_info(ops->exit_dump_len); 4734 if (!sch->exit_info) { 4735 ret = -ENOMEM; 4736 goto err_free_sch; 4737 } 4738 4739 ret = rhashtable_init(&sch->dsq_hash, &dsq_hash_params); 4740 if (ret < 0) 4741 goto err_free_ei; 4742 4743 sch->global_dsqs = kcalloc(nr_node_ids, sizeof(sch->global_dsqs[0]), 4744 GFP_KERNEL); 4745 if (!sch->global_dsqs) { 4746 ret = -ENOMEM; 4747 goto err_free_hash; 4748 } 4749 4750 for_each_node_state(node, N_POSSIBLE) { 4751 struct scx_dispatch_q *dsq; 4752 4753 dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node); 4754 if (!dsq) { 4755 ret = -ENOMEM; 4756 goto err_free_gdsqs; 4757 } 4758 4759 init_dsq(dsq, SCX_DSQ_GLOBAL); 4760 sch->global_dsqs[node] = dsq; 4761 } 4762 4763 sch->pcpu = alloc_percpu(struct scx_sched_pcpu); 4764 if (!sch->pcpu) 4765 goto err_free_gdsqs; 4766 4767 sch->helper = kthread_run_worker(0, "sched_ext_helper"); 4768 if (IS_ERR(sch->helper)) { 4769 ret = PTR_ERR(sch->helper); 4770 goto err_free_pcpu; 4771 } 4772 4773 sched_set_fifo(sch->helper->task); 4774 4775 atomic_set(&sch->exit_kind, SCX_EXIT_NONE); 4776 init_irq_work(&sch->error_irq_work, scx_error_irq_workfn); 4777 kthread_init_work(&sch->disable_work, scx_disable_workfn); 4778 sch->ops = *ops; 4779 ops->priv = sch; 4780 4781 sch->kobj.kset = scx_kset; 4782 ret = kobject_init_and_add(&sch->kobj, &scx_ktype, NULL, "root"); 4783 if (ret < 0) 4784 goto err_stop_helper; 4785 4786 return sch; 4787 4788 err_stop_helper: 4789 kthread_stop(sch->helper->task); 4790 err_free_pcpu: 4791 free_percpu(sch->pcpu); 4792 err_free_gdsqs: 4793 for_each_node_state(node, N_POSSIBLE) 4794 kfree(sch->global_dsqs[node]); 4795 kfree(sch->global_dsqs); 4796 err_free_hash: 4797 rhashtable_free_and_destroy(&sch->dsq_hash, NULL, NULL); 4798 err_free_ei: 4799 free_exit_info(sch->exit_info); 4800 err_free_sch: 4801 kfree(sch); 4802 return ERR_PTR(ret); 4803 } 4804 4805 static int check_hotplug_seq(struct scx_sched *sch, 4806 const struct sched_ext_ops *ops) 4807 { 4808 unsigned long long global_hotplug_seq; 4809 4810 /* 4811 * If a hotplug event has occurred between when a scheduler was 4812 * initialized, and when we were able to attach, exit and notify user 4813 * space about it. 4814 */ 4815 if (ops->hotplug_seq) { 4816 global_hotplug_seq = atomic_long_read(&scx_hotplug_seq); 4817 if (ops->hotplug_seq != global_hotplug_seq) { 4818 scx_exit(sch, SCX_EXIT_UNREG_KERN, 4819 SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG, 4820 "expected hotplug seq %llu did not match actual %llu", 4821 ops->hotplug_seq, global_hotplug_seq); 4822 return -EBUSY; 4823 } 4824 } 4825 4826 return 0; 4827 } 4828 4829 static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops) 4830 { 4831 /* 4832 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the 4833 * ops.enqueue() callback isn't implemented. 4834 */ 4835 if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) { 4836 scx_error(sch, "SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented"); 4837 return -EINVAL; 4838 } 4839 4840 /* 4841 * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle 4842 * selection policy to be enabled. 4843 */ 4844 if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) && 4845 (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) { 4846 scx_error(sch, "SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled"); 4847 return -EINVAL; 4848 } 4849 4850 if (ops->flags & SCX_OPS_HAS_CGROUP_WEIGHT) 4851 pr_warn("SCX_OPS_HAS_CGROUP_WEIGHT is deprecated and a noop\n"); 4852 4853 if (ops->cpu_acquire || ops->cpu_release) 4854 pr_warn("ops->cpu_acquire/release() are deprecated, use sched_switch TP instead\n"); 4855 4856 return 0; 4857 } 4858 4859 static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) 4860 { 4861 struct scx_sched *sch; 4862 struct scx_task_iter sti; 4863 struct task_struct *p; 4864 unsigned long timeout; 4865 int i, cpu, ret; 4866 4867 if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN), 4868 cpu_possible_mask)) { 4869 pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n"); 4870 return -EINVAL; 4871 } 4872 4873 mutex_lock(&scx_enable_mutex); 4874 4875 if (scx_enable_state() != SCX_DISABLED) { 4876 ret = -EBUSY; 4877 goto err_unlock; 4878 } 4879 4880 ret = alloc_kick_syncs(); 4881 if (ret) 4882 goto err_unlock; 4883 4884 sch = scx_alloc_and_add_sched(ops); 4885 if (IS_ERR(sch)) { 4886 ret = PTR_ERR(sch); 4887 goto err_free_ksyncs; 4888 } 4889 4890 /* 4891 * Transition to ENABLING and clear exit info to arm the disable path. 4892 * Failure triggers full disabling from here on. 4893 */ 4894 WARN_ON_ONCE(scx_set_enable_state(SCX_ENABLING) != SCX_DISABLED); 4895 WARN_ON_ONCE(scx_root); 4896 if (WARN_ON_ONCE(READ_ONCE(scx_aborting))) 4897 WRITE_ONCE(scx_aborting, false); 4898 4899 atomic_long_set(&scx_nr_rejected, 0); 4900 4901 for_each_possible_cpu(cpu) 4902 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE; 4903 4904 /* 4905 * Keep CPUs stable during enable so that the BPF scheduler can track 4906 * online CPUs by watching ->on/offline_cpu() after ->init(). 4907 */ 4908 cpus_read_lock(); 4909 4910 /* 4911 * Make the scheduler instance visible. Must be inside cpus_read_lock(). 4912 * See handle_hotplug(). 4913 */ 4914 rcu_assign_pointer(scx_root, sch); 4915 4916 scx_idle_enable(ops); 4917 4918 if (sch->ops.init) { 4919 ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, init, NULL); 4920 if (ret) { 4921 ret = ops_sanitize_err(sch, "init", ret); 4922 cpus_read_unlock(); 4923 scx_error(sch, "ops.init() failed (%d)", ret); 4924 goto err_disable; 4925 } 4926 sch->exit_info->flags |= SCX_EFLAG_INITIALIZED; 4927 } 4928 4929 for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++) 4930 if (((void (**)(void))ops)[i]) 4931 set_bit(i, sch->has_op); 4932 4933 ret = check_hotplug_seq(sch, ops); 4934 if (ret) { 4935 cpus_read_unlock(); 4936 goto err_disable; 4937 } 4938 scx_idle_update_selcpu_topology(ops); 4939 4940 cpus_read_unlock(); 4941 4942 ret = validate_ops(sch, ops); 4943 if (ret) 4944 goto err_disable; 4945 4946 WARN_ON_ONCE(scx_dsp_ctx); 4947 scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH; 4948 scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf, 4949 scx_dsp_max_batch), 4950 __alignof__(struct scx_dsp_ctx)); 4951 if (!scx_dsp_ctx) { 4952 ret = -ENOMEM; 4953 goto err_disable; 4954 } 4955 4956 if (ops->timeout_ms) 4957 timeout = msecs_to_jiffies(ops->timeout_ms); 4958 else 4959 timeout = SCX_WATCHDOG_MAX_TIMEOUT; 4960 4961 WRITE_ONCE(scx_watchdog_timeout, timeout); 4962 WRITE_ONCE(scx_watchdog_timestamp, jiffies); 4963 queue_delayed_work(system_unbound_wq, &scx_watchdog_work, 4964 scx_watchdog_timeout / 2); 4965 4966 /* 4967 * Once __scx_enabled is set, %current can be switched to SCX anytime. 4968 * This can lead to stalls as some BPF schedulers (e.g. userspace 4969 * scheduling) may not function correctly before all tasks are switched. 4970 * Init in bypass mode to guarantee forward progress. 4971 */ 4972 scx_bypass(true); 4973 4974 for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++) 4975 if (((void (**)(void))ops)[i]) 4976 set_bit(i, sch->has_op); 4977 4978 if (sch->ops.cpu_acquire || sch->ops.cpu_release) 4979 sch->ops.flags |= SCX_OPS_HAS_CPU_PREEMPT; 4980 4981 /* 4982 * Lock out forks, cgroup on/offlining and moves before opening the 4983 * floodgate so that they don't wander into the operations prematurely. 4984 */ 4985 percpu_down_write(&scx_fork_rwsem); 4986 4987 WARN_ON_ONCE(scx_init_task_enabled); 4988 scx_init_task_enabled = true; 4989 4990 /* 4991 * Enable ops for every task. Fork is excluded by scx_fork_rwsem 4992 * preventing new tasks from being added. No need to exclude tasks 4993 * leaving as sched_ext_free() can handle both prepped and enabled 4994 * tasks. Prep all tasks first and then enable them with preemption 4995 * disabled. 4996 * 4997 * All cgroups should be initialized before scx_init_task() so that the 4998 * BPF scheduler can reliably track each task's cgroup membership from 4999 * scx_init_task(). Lock out cgroup on/offlining and task migrations 5000 * while tasks are being initialized so that scx_cgroup_can_attach() 5001 * never sees uninitialized tasks. 5002 */ 5003 scx_cgroup_lock(); 5004 ret = scx_cgroup_init(sch); 5005 if (ret) 5006 goto err_disable_unlock_all; 5007 5008 scx_task_iter_start(&sti); 5009 while ((p = scx_task_iter_next_locked(&sti))) { 5010 /* 5011 * @p may already be dead, have lost all its usages counts and 5012 * be waiting for RCU grace period before being freed. @p can't 5013 * be initialized for SCX in such cases and should be ignored. 5014 */ 5015 if (!tryget_task_struct(p)) 5016 continue; 5017 5018 scx_task_iter_unlock(&sti); 5019 5020 ret = scx_init_task(p, task_group(p), false); 5021 if (ret) { 5022 put_task_struct(p); 5023 scx_task_iter_stop(&sti); 5024 scx_error(sch, "ops.init_task() failed (%d) for %s[%d]", 5025 ret, p->comm, p->pid); 5026 goto err_disable_unlock_all; 5027 } 5028 5029 scx_set_task_state(p, SCX_TASK_READY); 5030 5031 put_task_struct(p); 5032 } 5033 scx_task_iter_stop(&sti); 5034 scx_cgroup_unlock(); 5035 percpu_up_write(&scx_fork_rwsem); 5036 5037 /* 5038 * All tasks are READY. It's safe to turn on scx_enabled() and switch 5039 * all eligible tasks. 5040 */ 5041 WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL)); 5042 static_branch_enable(&__scx_enabled); 5043 5044 /* 5045 * We're fully committed and can't fail. The task READY -> ENABLED 5046 * transitions here are synchronized against sched_ext_free() through 5047 * scx_tasks_lock. 5048 */ 5049 percpu_down_write(&scx_fork_rwsem); 5050 scx_task_iter_start(&sti); 5051 while ((p = scx_task_iter_next_locked(&sti))) { 5052 unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE; 5053 const struct sched_class *old_class = p->sched_class; 5054 const struct sched_class *new_class = scx_setscheduler_class(p); 5055 5056 if (scx_get_task_state(p) != SCX_TASK_READY) 5057 continue; 5058 5059 if (old_class != new_class) 5060 queue_flags |= DEQUEUE_CLASS; 5061 5062 scoped_guard (sched_change, p, queue_flags) { 5063 p->scx.slice = READ_ONCE(scx_slice_dfl); 5064 p->sched_class = new_class; 5065 } 5066 } 5067 scx_task_iter_stop(&sti); 5068 percpu_up_write(&scx_fork_rwsem); 5069 5070 scx_bypass(false); 5071 5072 if (!scx_tryset_enable_state(SCX_ENABLED, SCX_ENABLING)) { 5073 WARN_ON_ONCE(atomic_read(&sch->exit_kind) == SCX_EXIT_NONE); 5074 goto err_disable; 5075 } 5076 5077 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL)) 5078 static_branch_enable(&__scx_switched_all); 5079 5080 pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n", 5081 sch->ops.name, scx_switched_all() ? "" : " (partial)"); 5082 kobject_uevent(&sch->kobj, KOBJ_ADD); 5083 mutex_unlock(&scx_enable_mutex); 5084 5085 atomic_long_inc(&scx_enable_seq); 5086 5087 return 0; 5088 5089 err_free_ksyncs: 5090 free_kick_syncs(); 5091 err_unlock: 5092 mutex_unlock(&scx_enable_mutex); 5093 return ret; 5094 5095 err_disable_unlock_all: 5096 scx_cgroup_unlock(); 5097 percpu_up_write(&scx_fork_rwsem); 5098 /* we'll soon enter disable path, keep bypass on */ 5099 err_disable: 5100 mutex_unlock(&scx_enable_mutex); 5101 /* 5102 * Returning an error code here would not pass all the error information 5103 * to userspace. Record errno using scx_error() for cases scx_error() 5104 * wasn't already invoked and exit indicating success so that the error 5105 * is notified through ops.exit() with all the details. 5106 * 5107 * Flush scx_disable_work to ensure that error is reported before init 5108 * completion. sch's base reference will be put by bpf_scx_unreg(). 5109 */ 5110 scx_error(sch, "scx_enable() failed (%d)", ret); 5111 kthread_flush_work(&sch->disable_work); 5112 return 0; 5113 } 5114 5115 5116 /******************************************************************************** 5117 * bpf_struct_ops plumbing. 5118 */ 5119 #include <linux/bpf_verifier.h> 5120 #include <linux/bpf.h> 5121 #include <linux/btf.h> 5122 5123 static const struct btf_type *task_struct_type; 5124 5125 static bool bpf_scx_is_valid_access(int off, int size, 5126 enum bpf_access_type type, 5127 const struct bpf_prog *prog, 5128 struct bpf_insn_access_aux *info) 5129 { 5130 if (type != BPF_READ) 5131 return false; 5132 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 5133 return false; 5134 if (off % size != 0) 5135 return false; 5136 5137 return btf_ctx_access(off, size, type, prog, info); 5138 } 5139 5140 static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log, 5141 const struct bpf_reg_state *reg, int off, 5142 int size) 5143 { 5144 const struct btf_type *t; 5145 5146 t = btf_type_by_id(reg->btf, reg->btf_id); 5147 if (t == task_struct_type) { 5148 if (off >= offsetof(struct task_struct, scx.slice) && 5149 off + size <= offsetofend(struct task_struct, scx.slice)) 5150 return SCALAR_VALUE; 5151 if (off >= offsetof(struct task_struct, scx.dsq_vtime) && 5152 off + size <= offsetofend(struct task_struct, scx.dsq_vtime)) 5153 return SCALAR_VALUE; 5154 if (off >= offsetof(struct task_struct, scx.disallow) && 5155 off + size <= offsetofend(struct task_struct, scx.disallow)) 5156 return SCALAR_VALUE; 5157 } 5158 5159 return -EACCES; 5160 } 5161 5162 static const struct bpf_verifier_ops bpf_scx_verifier_ops = { 5163 .get_func_proto = bpf_base_func_proto, 5164 .is_valid_access = bpf_scx_is_valid_access, 5165 .btf_struct_access = bpf_scx_btf_struct_access, 5166 }; 5167 5168 static int bpf_scx_init_member(const struct btf_type *t, 5169 const struct btf_member *member, 5170 void *kdata, const void *udata) 5171 { 5172 const struct sched_ext_ops *uops = udata; 5173 struct sched_ext_ops *ops = kdata; 5174 u32 moff = __btf_member_bit_offset(t, member) / 8; 5175 int ret; 5176 5177 switch (moff) { 5178 case offsetof(struct sched_ext_ops, dispatch_max_batch): 5179 if (*(u32 *)(udata + moff) > INT_MAX) 5180 return -E2BIG; 5181 ops->dispatch_max_batch = *(u32 *)(udata + moff); 5182 return 1; 5183 case offsetof(struct sched_ext_ops, flags): 5184 if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS) 5185 return -EINVAL; 5186 ops->flags = *(u64 *)(udata + moff); 5187 return 1; 5188 case offsetof(struct sched_ext_ops, name): 5189 ret = bpf_obj_name_cpy(ops->name, uops->name, 5190 sizeof(ops->name)); 5191 if (ret < 0) 5192 return ret; 5193 if (ret == 0) 5194 return -EINVAL; 5195 return 1; 5196 case offsetof(struct sched_ext_ops, timeout_ms): 5197 if (msecs_to_jiffies(*(u32 *)(udata + moff)) > 5198 SCX_WATCHDOG_MAX_TIMEOUT) 5199 return -E2BIG; 5200 ops->timeout_ms = *(u32 *)(udata + moff); 5201 return 1; 5202 case offsetof(struct sched_ext_ops, exit_dump_len): 5203 ops->exit_dump_len = 5204 *(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN; 5205 return 1; 5206 case offsetof(struct sched_ext_ops, hotplug_seq): 5207 ops->hotplug_seq = *(u64 *)(udata + moff); 5208 return 1; 5209 } 5210 5211 return 0; 5212 } 5213 5214 static int bpf_scx_check_member(const struct btf_type *t, 5215 const struct btf_member *member, 5216 const struct bpf_prog *prog) 5217 { 5218 u32 moff = __btf_member_bit_offset(t, member) / 8; 5219 5220 switch (moff) { 5221 case offsetof(struct sched_ext_ops, init_task): 5222 #ifdef CONFIG_EXT_GROUP_SCHED 5223 case offsetof(struct sched_ext_ops, cgroup_init): 5224 case offsetof(struct sched_ext_ops, cgroup_exit): 5225 case offsetof(struct sched_ext_ops, cgroup_prep_move): 5226 #endif 5227 case offsetof(struct sched_ext_ops, cpu_online): 5228 case offsetof(struct sched_ext_ops, cpu_offline): 5229 case offsetof(struct sched_ext_ops, init): 5230 case offsetof(struct sched_ext_ops, exit): 5231 break; 5232 default: 5233 if (prog->sleepable) 5234 return -EINVAL; 5235 } 5236 5237 return 0; 5238 } 5239 5240 static int bpf_scx_reg(void *kdata, struct bpf_link *link) 5241 { 5242 return scx_enable(kdata, link); 5243 } 5244 5245 static void bpf_scx_unreg(void *kdata, struct bpf_link *link) 5246 { 5247 struct sched_ext_ops *ops = kdata; 5248 struct scx_sched *sch = ops->priv; 5249 5250 scx_disable(SCX_EXIT_UNREG); 5251 kthread_flush_work(&sch->disable_work); 5252 kobject_put(&sch->kobj); 5253 } 5254 5255 static int bpf_scx_init(struct btf *btf) 5256 { 5257 task_struct_type = btf_type_by_id(btf, btf_tracing_ids[BTF_TRACING_TYPE_TASK]); 5258 5259 return 0; 5260 } 5261 5262 static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link) 5263 { 5264 /* 5265 * sched_ext does not support updating the actively-loaded BPF 5266 * scheduler, as registering a BPF scheduler can always fail if the 5267 * scheduler returns an error code for e.g. ops.init(), ops.init_task(), 5268 * etc. Similarly, we can always race with unregistration happening 5269 * elsewhere, such as with sysrq. 5270 */ 5271 return -EOPNOTSUPP; 5272 } 5273 5274 static int bpf_scx_validate(void *kdata) 5275 { 5276 return 0; 5277 } 5278 5279 static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; } 5280 static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {} 5281 static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {} 5282 static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {} 5283 static void sched_ext_ops__tick(struct task_struct *p) {} 5284 static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {} 5285 static void sched_ext_ops__running(struct task_struct *p) {} 5286 static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {} 5287 static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {} 5288 static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; } 5289 static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; } 5290 static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {} 5291 static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {} 5292 static void sched_ext_ops__update_idle(s32 cpu, bool idle) {} 5293 static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {} 5294 static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {} 5295 static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; } 5296 static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {} 5297 static void sched_ext_ops__enable(struct task_struct *p) {} 5298 static void sched_ext_ops__disable(struct task_struct *p) {} 5299 #ifdef CONFIG_EXT_GROUP_SCHED 5300 static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; } 5301 static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {} 5302 static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; } 5303 static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {} 5304 static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {} 5305 static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {} 5306 static void sched_ext_ops__cgroup_set_bandwidth(struct cgroup *cgrp, u64 period_us, u64 quota_us, u64 burst_us) {} 5307 static void sched_ext_ops__cgroup_set_idle(struct cgroup *cgrp, bool idle) {} 5308 #endif 5309 static void sched_ext_ops__cpu_online(s32 cpu) {} 5310 static void sched_ext_ops__cpu_offline(s32 cpu) {} 5311 static s32 sched_ext_ops__init(void) { return -EINVAL; } 5312 static void sched_ext_ops__exit(struct scx_exit_info *info) {} 5313 static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {} 5314 static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {} 5315 static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {} 5316 5317 static struct sched_ext_ops __bpf_ops_sched_ext_ops = { 5318 .select_cpu = sched_ext_ops__select_cpu, 5319 .enqueue = sched_ext_ops__enqueue, 5320 .dequeue = sched_ext_ops__dequeue, 5321 .dispatch = sched_ext_ops__dispatch, 5322 .tick = sched_ext_ops__tick, 5323 .runnable = sched_ext_ops__runnable, 5324 .running = sched_ext_ops__running, 5325 .stopping = sched_ext_ops__stopping, 5326 .quiescent = sched_ext_ops__quiescent, 5327 .yield = sched_ext_ops__yield, 5328 .core_sched_before = sched_ext_ops__core_sched_before, 5329 .set_weight = sched_ext_ops__set_weight, 5330 .set_cpumask = sched_ext_ops__set_cpumask, 5331 .update_idle = sched_ext_ops__update_idle, 5332 .cpu_acquire = sched_ext_ops__cpu_acquire, 5333 .cpu_release = sched_ext_ops__cpu_release, 5334 .init_task = sched_ext_ops__init_task, 5335 .exit_task = sched_ext_ops__exit_task, 5336 .enable = sched_ext_ops__enable, 5337 .disable = sched_ext_ops__disable, 5338 #ifdef CONFIG_EXT_GROUP_SCHED 5339 .cgroup_init = sched_ext_ops__cgroup_init, 5340 .cgroup_exit = sched_ext_ops__cgroup_exit, 5341 .cgroup_prep_move = sched_ext_ops__cgroup_prep_move, 5342 .cgroup_move = sched_ext_ops__cgroup_move, 5343 .cgroup_cancel_move = sched_ext_ops__cgroup_cancel_move, 5344 .cgroup_set_weight = sched_ext_ops__cgroup_set_weight, 5345 .cgroup_set_bandwidth = sched_ext_ops__cgroup_set_bandwidth, 5346 .cgroup_set_idle = sched_ext_ops__cgroup_set_idle, 5347 #endif 5348 .cpu_online = sched_ext_ops__cpu_online, 5349 .cpu_offline = sched_ext_ops__cpu_offline, 5350 .init = sched_ext_ops__init, 5351 .exit = sched_ext_ops__exit, 5352 .dump = sched_ext_ops__dump, 5353 .dump_cpu = sched_ext_ops__dump_cpu, 5354 .dump_task = sched_ext_ops__dump_task, 5355 }; 5356 5357 static struct bpf_struct_ops bpf_sched_ext_ops = { 5358 .verifier_ops = &bpf_scx_verifier_ops, 5359 .reg = bpf_scx_reg, 5360 .unreg = bpf_scx_unreg, 5361 .check_member = bpf_scx_check_member, 5362 .init_member = bpf_scx_init_member, 5363 .init = bpf_scx_init, 5364 .update = bpf_scx_update, 5365 .validate = bpf_scx_validate, 5366 .name = "sched_ext_ops", 5367 .owner = THIS_MODULE, 5368 .cfi_stubs = &__bpf_ops_sched_ext_ops 5369 }; 5370 5371 5372 /******************************************************************************** 5373 * System integration and init. 5374 */ 5375 5376 static void sysrq_handle_sched_ext_reset(u8 key) 5377 { 5378 scx_disable(SCX_EXIT_SYSRQ); 5379 } 5380 5381 static const struct sysrq_key_op sysrq_sched_ext_reset_op = { 5382 .handler = sysrq_handle_sched_ext_reset, 5383 .help_msg = "reset-sched-ext(S)", 5384 .action_msg = "Disable sched_ext and revert all tasks to CFS", 5385 .enable_mask = SYSRQ_ENABLE_RTNICE, 5386 }; 5387 5388 static void sysrq_handle_sched_ext_dump(u8 key) 5389 { 5390 struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" }; 5391 5392 if (scx_enabled()) 5393 scx_dump_state(&ei, 0); 5394 } 5395 5396 static const struct sysrq_key_op sysrq_sched_ext_dump_op = { 5397 .handler = sysrq_handle_sched_ext_dump, 5398 .help_msg = "dump-sched-ext(D)", 5399 .action_msg = "Trigger sched_ext debug dump", 5400 .enable_mask = SYSRQ_ENABLE_RTNICE, 5401 }; 5402 5403 static bool can_skip_idle_kick(struct rq *rq) 5404 { 5405 lockdep_assert_rq_held(rq); 5406 5407 /* 5408 * We can skip idle kicking if @rq is going to go through at least one 5409 * full SCX scheduling cycle before going idle. Just checking whether 5410 * curr is not idle is insufficient because we could be racing 5411 * balance_one() trying to pull the next task from a remote rq, which 5412 * may fail, and @rq may become idle afterwards. 5413 * 5414 * The race window is small and we don't and can't guarantee that @rq is 5415 * only kicked while idle anyway. Skip only when sure. 5416 */ 5417 return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE); 5418 } 5419 5420 static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *ksyncs) 5421 { 5422 struct rq *rq = cpu_rq(cpu); 5423 struct scx_rq *this_scx = &this_rq->scx; 5424 const struct sched_class *cur_class; 5425 bool should_wait = false; 5426 unsigned long flags; 5427 5428 raw_spin_rq_lock_irqsave(rq, flags); 5429 cur_class = rq->curr->sched_class; 5430 5431 /* 5432 * During CPU hotplug, a CPU may depend on kicking itself to make 5433 * forward progress. Allow kicking self regardless of online state. If 5434 * @cpu is running a higher class task, we have no control over @cpu. 5435 * Skip kicking. 5436 */ 5437 if ((cpu_online(cpu) || cpu == cpu_of(this_rq)) && 5438 !sched_class_above(cur_class, &ext_sched_class)) { 5439 if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) { 5440 if (cur_class == &ext_sched_class) 5441 rq->curr->scx.slice = 0; 5442 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt); 5443 } 5444 5445 if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) { 5446 if (cur_class == &ext_sched_class) { 5447 ksyncs[cpu] = rq->scx.kick_sync; 5448 should_wait = true; 5449 } else { 5450 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); 5451 } 5452 } 5453 5454 resched_curr(rq); 5455 } else { 5456 cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt); 5457 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); 5458 } 5459 5460 raw_spin_rq_unlock_irqrestore(rq, flags); 5461 5462 return should_wait; 5463 } 5464 5465 static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq) 5466 { 5467 struct rq *rq = cpu_rq(cpu); 5468 unsigned long flags; 5469 5470 raw_spin_rq_lock_irqsave(rq, flags); 5471 5472 if (!can_skip_idle_kick(rq) && 5473 (cpu_online(cpu) || cpu == cpu_of(this_rq))) 5474 resched_curr(rq); 5475 5476 raw_spin_rq_unlock_irqrestore(rq, flags); 5477 } 5478 5479 static void kick_cpus_irq_workfn(struct irq_work *irq_work) 5480 { 5481 struct rq *this_rq = this_rq(); 5482 struct scx_rq *this_scx = &this_rq->scx; 5483 struct scx_kick_syncs __rcu *ksyncs_pcpu = __this_cpu_read(scx_kick_syncs); 5484 bool should_wait = false; 5485 unsigned long *ksyncs; 5486 s32 cpu; 5487 5488 if (unlikely(!ksyncs_pcpu)) { 5489 pr_warn_once("kick_cpus_irq_workfn() called with NULL scx_kick_syncs"); 5490 return; 5491 } 5492 5493 ksyncs = rcu_dereference_bh(ksyncs_pcpu)->syncs; 5494 5495 for_each_cpu(cpu, this_scx->cpus_to_kick) { 5496 should_wait |= kick_one_cpu(cpu, this_rq, ksyncs); 5497 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick); 5498 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle); 5499 } 5500 5501 for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) { 5502 kick_one_cpu_if_idle(cpu, this_rq); 5503 cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle); 5504 } 5505 5506 if (!should_wait) 5507 return; 5508 5509 for_each_cpu(cpu, this_scx->cpus_to_wait) { 5510 unsigned long *wait_kick_sync = &cpu_rq(cpu)->scx.kick_sync; 5511 5512 /* 5513 * Busy-wait until the task running at the time of kicking is no 5514 * longer running. This can be used to implement e.g. core 5515 * scheduling. 5516 * 5517 * smp_cond_load_acquire() pairs with store_releases in 5518 * pick_task_scx() and put_prev_task_scx(). The former breaks 5519 * the wait if SCX's scheduling path is entered even if the same 5520 * task is picked subsequently. The latter is necessary to break 5521 * the wait when $cpu is taken by a higher sched class. 5522 */ 5523 if (cpu != cpu_of(this_rq)) 5524 smp_cond_load_acquire(wait_kick_sync, VAL != ksyncs[cpu]); 5525 5526 cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); 5527 } 5528 } 5529 5530 /** 5531 * print_scx_info - print out sched_ext scheduler state 5532 * @log_lvl: the log level to use when printing 5533 * @p: target task 5534 * 5535 * If a sched_ext scheduler is enabled, print the name and state of the 5536 * scheduler. If @p is on sched_ext, print further information about the task. 5537 * 5538 * This function can be safely called on any task as long as the task_struct 5539 * itself is accessible. While safe, this function isn't synchronized and may 5540 * print out mixups or garbages of limited length. 5541 */ 5542 void print_scx_info(const char *log_lvl, struct task_struct *p) 5543 { 5544 struct scx_sched *sch = scx_root; 5545 enum scx_enable_state state = scx_enable_state(); 5546 const char *all = READ_ONCE(scx_switching_all) ? "+all" : ""; 5547 char runnable_at_buf[22] = "?"; 5548 struct sched_class *class; 5549 unsigned long runnable_at; 5550 5551 if (state == SCX_DISABLED) 5552 return; 5553 5554 /* 5555 * Carefully check if the task was running on sched_ext, and then 5556 * carefully copy the time it's been runnable, and its state. 5557 */ 5558 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) || 5559 class != &ext_sched_class) { 5560 printk("%sSched_ext: %s (%s%s)", log_lvl, sch->ops.name, 5561 scx_enable_state_str[state], all); 5562 return; 5563 } 5564 5565 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at, 5566 sizeof(runnable_at))) 5567 scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms", 5568 jiffies_delta_msecs(runnable_at, jiffies)); 5569 5570 /* print everything onto one line to conserve console space */ 5571 printk("%sSched_ext: %s (%s%s), task: runnable_at=%s", 5572 log_lvl, sch->ops.name, scx_enable_state_str[state], all, 5573 runnable_at_buf); 5574 } 5575 5576 static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr) 5577 { 5578 /* 5579 * SCX schedulers often have userspace components which are sometimes 5580 * involved in critial scheduling paths. PM operations involve freezing 5581 * userspace which can lead to scheduling misbehaviors including stalls. 5582 * Let's bypass while PM operations are in progress. 5583 */ 5584 switch (event) { 5585 case PM_HIBERNATION_PREPARE: 5586 case PM_SUSPEND_PREPARE: 5587 case PM_RESTORE_PREPARE: 5588 scx_bypass(true); 5589 break; 5590 case PM_POST_HIBERNATION: 5591 case PM_POST_SUSPEND: 5592 case PM_POST_RESTORE: 5593 scx_bypass(false); 5594 break; 5595 } 5596 5597 return NOTIFY_OK; 5598 } 5599 5600 static struct notifier_block scx_pm_notifier = { 5601 .notifier_call = scx_pm_handler, 5602 }; 5603 5604 void __init init_sched_ext_class(void) 5605 { 5606 s32 cpu, v; 5607 5608 /* 5609 * The following is to prevent the compiler from optimizing out the enum 5610 * definitions so that BPF scheduler implementations can use them 5611 * through the generated vmlinux.h. 5612 */ 5613 WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT | 5614 SCX_TG_ONLINE); 5615 5616 scx_idle_init_masks(); 5617 5618 for_each_possible_cpu(cpu) { 5619 struct rq *rq = cpu_rq(cpu); 5620 int n = cpu_to_node(cpu); 5621 5622 init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); 5623 init_dsq(&rq->scx.bypass_dsq, SCX_DSQ_BYPASS); 5624 INIT_LIST_HEAD(&rq->scx.runnable_list); 5625 INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals); 5626 5627 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n)); 5628 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n)); 5629 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n)); 5630 BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n)); 5631 rq->scx.deferred_irq_work = IRQ_WORK_INIT_HARD(deferred_irq_workfn); 5632 rq->scx.kick_cpus_irq_work = IRQ_WORK_INIT_HARD(kick_cpus_irq_workfn); 5633 5634 if (cpu_online(cpu)) 5635 cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE; 5636 } 5637 5638 register_sysrq_key('S', &sysrq_sched_ext_reset_op); 5639 register_sysrq_key('D', &sysrq_sched_ext_dump_op); 5640 INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn); 5641 } 5642 5643 5644 /******************************************************************************** 5645 * Helpers that can be called from the BPF scheduler. 5646 */ 5647 static bool scx_dsq_insert_preamble(struct scx_sched *sch, struct task_struct *p, 5648 u64 enq_flags) 5649 { 5650 if (!scx_kf_allowed(sch, SCX_KF_ENQUEUE | SCX_KF_DISPATCH)) 5651 return false; 5652 5653 lockdep_assert_irqs_disabled(); 5654 5655 if (unlikely(!p)) { 5656 scx_error(sch, "called with NULL task"); 5657 return false; 5658 } 5659 5660 if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) { 5661 scx_error(sch, "invalid enq_flags 0x%llx", enq_flags); 5662 return false; 5663 } 5664 5665 return true; 5666 } 5667 5668 static void scx_dsq_insert_commit(struct scx_sched *sch, struct task_struct *p, 5669 u64 dsq_id, u64 enq_flags) 5670 { 5671 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 5672 struct task_struct *ddsp_task; 5673 5674 ddsp_task = __this_cpu_read(direct_dispatch_task); 5675 if (ddsp_task) { 5676 mark_direct_dispatch(sch, ddsp_task, p, dsq_id, enq_flags); 5677 return; 5678 } 5679 5680 if (unlikely(dspc->cursor >= scx_dsp_max_batch)) { 5681 scx_error(sch, "dispatch buffer overflow"); 5682 return; 5683 } 5684 5685 dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){ 5686 .task = p, 5687 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK, 5688 .dsq_id = dsq_id, 5689 .enq_flags = enq_flags, 5690 }; 5691 } 5692 5693 __bpf_kfunc_start_defs(); 5694 5695 /** 5696 * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ 5697 * @p: task_struct to insert 5698 * @dsq_id: DSQ to insert into 5699 * @slice: duration @p can run for in nsecs, 0 to keep the current value 5700 * @enq_flags: SCX_ENQ_* 5701 * 5702 * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to 5703 * call this function spuriously. Can be called from ops.enqueue(), 5704 * ops.select_cpu(), and ops.dispatch(). 5705 * 5706 * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch 5707 * and @p must match the task being enqueued. 5708 * 5709 * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p 5710 * will be directly inserted into the corresponding dispatch queue after 5711 * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be 5712 * inserted into the local DSQ of the CPU returned by ops.select_cpu(). 5713 * @enq_flags are OR'd with the enqueue flags on the enqueue path before the 5714 * task is inserted. 5715 * 5716 * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id 5717 * and this function can be called upto ops.dispatch_max_batch times to insert 5718 * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the 5719 * remaining slots. scx_bpf_dsq_move_to_local() flushes the batch and resets the 5720 * counter. 5721 * 5722 * This function doesn't have any locking restrictions and may be called under 5723 * BPF locks (in the future when BPF introduces more flexible locking). 5724 * 5725 * @p is allowed to run for @slice. The scheduling path is triggered on slice 5726 * exhaustion. If zero, the current residual slice is maintained. If 5727 * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with 5728 * scx_bpf_kick_cpu() to trigger scheduling. 5729 * 5730 * Returns %true on successful insertion, %false on failure. On the root 5731 * scheduler, %false return triggers scheduler abort and the caller doesn't need 5732 * to check the return value. 5733 */ 5734 __bpf_kfunc bool scx_bpf_dsq_insert___v2(struct task_struct *p, u64 dsq_id, 5735 u64 slice, u64 enq_flags) 5736 { 5737 struct scx_sched *sch; 5738 5739 guard(rcu)(); 5740 sch = rcu_dereference(scx_root); 5741 if (unlikely(!sch)) 5742 return false; 5743 5744 if (!scx_dsq_insert_preamble(sch, p, enq_flags)) 5745 return false; 5746 5747 if (slice) 5748 p->scx.slice = slice; 5749 else 5750 p->scx.slice = p->scx.slice ?: 1; 5751 5752 scx_dsq_insert_commit(sch, p, dsq_id, enq_flags); 5753 5754 return true; 5755 } 5756 5757 /* 5758 * COMPAT: Will be removed in v6.23 along with the ___v2 suffix. 5759 */ 5760 __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, 5761 u64 slice, u64 enq_flags) 5762 { 5763 scx_bpf_dsq_insert___v2(p, dsq_id, slice, enq_flags); 5764 } 5765 5766 static bool scx_dsq_insert_vtime(struct scx_sched *sch, struct task_struct *p, 5767 u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) 5768 { 5769 if (!scx_dsq_insert_preamble(sch, p, enq_flags)) 5770 return false; 5771 5772 if (slice) 5773 p->scx.slice = slice; 5774 else 5775 p->scx.slice = p->scx.slice ?: 1; 5776 5777 p->scx.dsq_vtime = vtime; 5778 5779 scx_dsq_insert_commit(sch, p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); 5780 5781 return true; 5782 } 5783 5784 struct scx_bpf_dsq_insert_vtime_args { 5785 /* @p can't be packed together as KF_RCU is not transitive */ 5786 u64 dsq_id; 5787 u64 slice; 5788 u64 vtime; 5789 u64 enq_flags; 5790 }; 5791 5792 /** 5793 * __scx_bpf_dsq_insert_vtime - Arg-wrapped vtime DSQ insertion 5794 * @p: task_struct to insert 5795 * @args: struct containing the rest of the arguments 5796 * @args->dsq_id: DSQ to insert into 5797 * @args->slice: duration @p can run for in nsecs, 0 to keep the current value 5798 * @args->vtime: @p's ordering inside the vtime-sorted queue of the target DSQ 5799 * @args->enq_flags: SCX_ENQ_* 5800 * 5801 * Wrapper kfunc that takes arguments via struct to work around BPF's 5 argument 5802 * limit. BPF programs should use scx_bpf_dsq_insert_vtime() which is provided 5803 * as an inline wrapper in common.bpf.h. 5804 * 5805 * Insert @p into the vtime priority queue of the DSQ identified by 5806 * @args->dsq_id. Tasks queued into the priority queue are ordered by 5807 * @args->vtime. All other aspects are identical to scx_bpf_dsq_insert(). 5808 * 5809 * @args->vtime ordering is according to time_before64() which considers 5810 * wrapping. A numerically larger vtime may indicate an earlier position in the 5811 * ordering and vice-versa. 5812 * 5813 * A DSQ can only be used as a FIFO or priority queue at any given time and this 5814 * function must not be called on a DSQ which already has one or more FIFO tasks 5815 * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and 5816 * SCX_DSQ_GLOBAL) cannot be used as priority queues. 5817 * 5818 * Returns %true on successful insertion, %false on failure. On the root 5819 * scheduler, %false return triggers scheduler abort and the caller doesn't need 5820 * to check the return value. 5821 */ 5822 __bpf_kfunc bool 5823 __scx_bpf_dsq_insert_vtime(struct task_struct *p, 5824 struct scx_bpf_dsq_insert_vtime_args *args) 5825 { 5826 struct scx_sched *sch; 5827 5828 guard(rcu)(); 5829 5830 sch = rcu_dereference(scx_root); 5831 if (unlikely(!sch)) 5832 return false; 5833 5834 return scx_dsq_insert_vtime(sch, p, args->dsq_id, args->slice, 5835 args->vtime, args->enq_flags); 5836 } 5837 5838 /* 5839 * COMPAT: Will be removed in v6.23. 5840 */ 5841 __bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, 5842 u64 slice, u64 vtime, u64 enq_flags) 5843 { 5844 struct scx_sched *sch; 5845 5846 guard(rcu)(); 5847 5848 sch = rcu_dereference(scx_root); 5849 if (unlikely(!sch)) 5850 return; 5851 5852 scx_dsq_insert_vtime(sch, p, dsq_id, slice, vtime, enq_flags); 5853 } 5854 5855 __bpf_kfunc_end_defs(); 5856 5857 BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch) 5858 BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU) 5859 BTF_ID_FLAGS(func, scx_bpf_dsq_insert___v2, KF_RCU) 5860 BTF_ID_FLAGS(func, __scx_bpf_dsq_insert_vtime, KF_RCU) 5861 BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU) 5862 BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch) 5863 5864 static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = { 5865 .owner = THIS_MODULE, 5866 .set = &scx_kfunc_ids_enqueue_dispatch, 5867 }; 5868 5869 static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit, 5870 struct task_struct *p, u64 dsq_id, u64 enq_flags) 5871 { 5872 struct scx_sched *sch = scx_root; 5873 struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq; 5874 struct rq *this_rq, *src_rq, *locked_rq; 5875 bool dispatched = false; 5876 bool in_balance; 5877 unsigned long flags; 5878 5879 if (!scx_kf_allowed_if_unlocked() && 5880 !scx_kf_allowed(sch, SCX_KF_DISPATCH)) 5881 return false; 5882 5883 /* 5884 * If the BPF scheduler keeps calling this function repeatedly, it can 5885 * cause similar live-lock conditions as consume_dispatch_q(). 5886 */ 5887 if (unlikely(READ_ONCE(scx_aborting))) 5888 return false; 5889 5890 /* 5891 * Can be called from either ops.dispatch() locking this_rq() or any 5892 * context where no rq lock is held. If latter, lock @p's task_rq which 5893 * we'll likely need anyway. 5894 */ 5895 src_rq = task_rq(p); 5896 5897 local_irq_save(flags); 5898 this_rq = this_rq(); 5899 in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE; 5900 5901 if (in_balance) { 5902 if (this_rq != src_rq) { 5903 raw_spin_rq_unlock(this_rq); 5904 raw_spin_rq_lock(src_rq); 5905 } 5906 } else { 5907 raw_spin_rq_lock(src_rq); 5908 } 5909 5910 locked_rq = src_rq; 5911 raw_spin_lock(&src_dsq->lock); 5912 5913 /* 5914 * Did someone else get to it? @p could have already left $src_dsq, got 5915 * re-enqueud, or be in the process of being consumed by someone else. 5916 */ 5917 if (unlikely(p->scx.dsq != src_dsq || 5918 u32_before(kit->cursor.priv, p->scx.dsq_seq) || 5919 p->scx.holding_cpu >= 0) || 5920 WARN_ON_ONCE(src_rq != task_rq(p))) { 5921 raw_spin_unlock(&src_dsq->lock); 5922 goto out; 5923 } 5924 5925 /* @p is still on $src_dsq and stable, determine the destination */ 5926 dst_dsq = find_dsq_for_dispatch(sch, this_rq, dsq_id, p); 5927 5928 /* 5929 * Apply vtime and slice updates before moving so that the new time is 5930 * visible before inserting into $dst_dsq. @p is still on $src_dsq but 5931 * this is safe as we're locking it. 5932 */ 5933 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME) 5934 p->scx.dsq_vtime = kit->vtime; 5935 if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE) 5936 p->scx.slice = kit->slice; 5937 5938 /* execute move */ 5939 locked_rq = move_task_between_dsqs(sch, p, enq_flags, src_dsq, dst_dsq); 5940 dispatched = true; 5941 out: 5942 if (in_balance) { 5943 if (this_rq != locked_rq) { 5944 raw_spin_rq_unlock(locked_rq); 5945 raw_spin_rq_lock(this_rq); 5946 } 5947 } else { 5948 raw_spin_rq_unlock_irqrestore(locked_rq, flags); 5949 } 5950 5951 kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE | 5952 __SCX_DSQ_ITER_HAS_VTIME); 5953 return dispatched; 5954 } 5955 5956 __bpf_kfunc_start_defs(); 5957 5958 /** 5959 * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots 5960 * 5961 * Can only be called from ops.dispatch(). 5962 */ 5963 __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void) 5964 { 5965 struct scx_sched *sch; 5966 5967 guard(rcu)(); 5968 5969 sch = rcu_dereference(scx_root); 5970 if (unlikely(!sch)) 5971 return 0; 5972 5973 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH)) 5974 return 0; 5975 5976 return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor); 5977 } 5978 5979 /** 5980 * scx_bpf_dispatch_cancel - Cancel the latest dispatch 5981 * 5982 * Cancel the latest dispatch. Can be called multiple times to cancel further 5983 * dispatches. Can only be called from ops.dispatch(). 5984 */ 5985 __bpf_kfunc void scx_bpf_dispatch_cancel(void) 5986 { 5987 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 5988 struct scx_sched *sch; 5989 5990 guard(rcu)(); 5991 5992 sch = rcu_dereference(scx_root); 5993 if (unlikely(!sch)) 5994 return; 5995 5996 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH)) 5997 return; 5998 5999 if (dspc->cursor > 0) 6000 dspc->cursor--; 6001 else 6002 scx_error(sch, "dispatch buffer underflow"); 6003 } 6004 6005 /** 6006 * scx_bpf_dsq_move_to_local - move a task from a DSQ to the current CPU's local DSQ 6007 * @dsq_id: DSQ to move task from 6008 * 6009 * Move a task from the non-local DSQ identified by @dsq_id to the current CPU's 6010 * local DSQ for execution. Can only be called from ops.dispatch(). 6011 * 6012 * This function flushes the in-flight dispatches from scx_bpf_dsq_insert() 6013 * before trying to move from the specified DSQ. It may also grab rq locks and 6014 * thus can't be called under any BPF locks. 6015 * 6016 * Returns %true if a task has been moved, %false if there isn't any task to 6017 * move. 6018 */ 6019 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id) 6020 { 6021 struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx); 6022 struct scx_dispatch_q *dsq; 6023 struct scx_sched *sch; 6024 6025 guard(rcu)(); 6026 6027 sch = rcu_dereference(scx_root); 6028 if (unlikely(!sch)) 6029 return false; 6030 6031 if (!scx_kf_allowed(sch, SCX_KF_DISPATCH)) 6032 return false; 6033 6034 flush_dispatch_buf(sch, dspc->rq); 6035 6036 dsq = find_user_dsq(sch, dsq_id); 6037 if (unlikely(!dsq)) { 6038 scx_error(sch, "invalid DSQ ID 0x%016llx", dsq_id); 6039 return false; 6040 } 6041 6042 if (consume_dispatch_q(sch, dspc->rq, dsq)) { 6043 /* 6044 * A successfully consumed task can be dequeued before it starts 6045 * running while the CPU is trying to migrate other dispatched 6046 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty 6047 * local DSQ. 6048 */ 6049 dspc->nr_tasks++; 6050 return true; 6051 } else { 6052 return false; 6053 } 6054 } 6055 6056 /** 6057 * scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs 6058 * @it__iter: DSQ iterator in progress 6059 * @slice: duration the moved task can run for in nsecs 6060 * 6061 * Override the slice of the next task that will be moved from @it__iter using 6062 * scx_bpf_dsq_move[_vtime](). If this function is not called, the previous 6063 * slice duration is kept. 6064 */ 6065 __bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, 6066 u64 slice) 6067 { 6068 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter; 6069 6070 kit->slice = slice; 6071 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE; 6072 } 6073 6074 /** 6075 * scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs 6076 * @it__iter: DSQ iterator in progress 6077 * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ 6078 * 6079 * Override the vtime of the next task that will be moved from @it__iter using 6080 * scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice 6081 * vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the 6082 * override is ignored and cleared. 6083 */ 6084 __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, 6085 u64 vtime) 6086 { 6087 struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter; 6088 6089 kit->vtime = vtime; 6090 kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME; 6091 } 6092 6093 /** 6094 * scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ 6095 * @it__iter: DSQ iterator in progress 6096 * @p: task to transfer 6097 * @dsq_id: DSQ to move @p to 6098 * @enq_flags: SCX_ENQ_* 6099 * 6100 * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ 6101 * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can 6102 * be the destination. 6103 * 6104 * For the transfer to be successful, @p must still be on the DSQ and have been 6105 * queued before the DSQ iteration started. This function doesn't care whether 6106 * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have 6107 * been queued before the iteration started. 6108 * 6109 * @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update. 6110 * 6111 * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq 6112 * lock (e.g. BPF timers or SYSCALL programs). 6113 * 6114 * Returns %true if @p has been consumed, %false if @p had already been 6115 * consumed, dequeued, or, for sub-scheds, @dsq_id points to a disallowed local 6116 * DSQ. 6117 */ 6118 __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, 6119 struct task_struct *p, u64 dsq_id, 6120 u64 enq_flags) 6121 { 6122 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter, 6123 p, dsq_id, enq_flags); 6124 } 6125 6126 /** 6127 * scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ 6128 * @it__iter: DSQ iterator in progress 6129 * @p: task to transfer 6130 * @dsq_id: DSQ to move @p to 6131 * @enq_flags: SCX_ENQ_* 6132 * 6133 * Transfer @p which is on the DSQ currently iterated by @it__iter to the 6134 * priority queue of the DSQ specified by @dsq_id. The destination must be a 6135 * user DSQ as only user DSQs support priority queue. 6136 * 6137 * @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice() 6138 * and scx_bpf_dsq_move_set_vtime() to update. 6139 * 6140 * All other aspects are identical to scx_bpf_dsq_move(). See 6141 * scx_bpf_dsq_insert_vtime() for more information on @vtime. 6142 */ 6143 __bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, 6144 struct task_struct *p, u64 dsq_id, 6145 u64 enq_flags) 6146 { 6147 return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter, 6148 p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); 6149 } 6150 6151 __bpf_kfunc_end_defs(); 6152 6153 BTF_KFUNCS_START(scx_kfunc_ids_dispatch) 6154 BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots) 6155 BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel) 6156 BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local) 6157 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU) 6158 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU) 6159 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU) 6160 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU) 6161 BTF_KFUNCS_END(scx_kfunc_ids_dispatch) 6162 6163 static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = { 6164 .owner = THIS_MODULE, 6165 .set = &scx_kfunc_ids_dispatch, 6166 }; 6167 6168 static u32 reenq_local(struct rq *rq) 6169 { 6170 LIST_HEAD(tasks); 6171 u32 nr_enqueued = 0; 6172 struct task_struct *p, *n; 6173 6174 lockdep_assert_rq_held(rq); 6175 6176 /* 6177 * The BPF scheduler may choose to dispatch tasks back to 6178 * @rq->scx.local_dsq. Move all candidate tasks off to a private list 6179 * first to avoid processing the same tasks repeatedly. 6180 */ 6181 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list, 6182 scx.dsq_list.node) { 6183 /* 6184 * If @p is being migrated, @p's current CPU may not agree with 6185 * its allowed CPUs and the migration_cpu_stop is about to 6186 * deactivate and re-activate @p anyway. Skip re-enqueueing. 6187 * 6188 * While racing sched property changes may also dequeue and 6189 * re-enqueue a migrating task while its current CPU and allowed 6190 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to 6191 * the current local DSQ for running tasks and thus are not 6192 * visible to the BPF scheduler. 6193 */ 6194 if (p->migration_pending) 6195 continue; 6196 6197 dispatch_dequeue(rq, p); 6198 list_add_tail(&p->scx.dsq_list.node, &tasks); 6199 } 6200 6201 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) { 6202 list_del_init(&p->scx.dsq_list.node); 6203 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1); 6204 nr_enqueued++; 6205 } 6206 6207 return nr_enqueued; 6208 } 6209 6210 __bpf_kfunc_start_defs(); 6211 6212 /** 6213 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ 6214 * 6215 * Iterate over all of the tasks currently enqueued on the local DSQ of the 6216 * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of 6217 * processed tasks. Can only be called from ops.cpu_release(). 6218 * 6219 * COMPAT: Will be removed in v6.23 along with the ___v2 suffix on the void 6220 * returning variant that can be called from anywhere. 6221 */ 6222 __bpf_kfunc u32 scx_bpf_reenqueue_local(void) 6223 { 6224 struct scx_sched *sch; 6225 struct rq *rq; 6226 6227 guard(rcu)(); 6228 sch = rcu_dereference(scx_root); 6229 if (unlikely(!sch)) 6230 return 0; 6231 6232 if (!scx_kf_allowed(sch, SCX_KF_CPU_RELEASE)) 6233 return 0; 6234 6235 rq = cpu_rq(smp_processor_id()); 6236 lockdep_assert_rq_held(rq); 6237 6238 return reenq_local(rq); 6239 } 6240 6241 __bpf_kfunc_end_defs(); 6242 6243 BTF_KFUNCS_START(scx_kfunc_ids_cpu_release) 6244 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local) 6245 BTF_KFUNCS_END(scx_kfunc_ids_cpu_release) 6246 6247 static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = { 6248 .owner = THIS_MODULE, 6249 .set = &scx_kfunc_ids_cpu_release, 6250 }; 6251 6252 __bpf_kfunc_start_defs(); 6253 6254 /** 6255 * scx_bpf_create_dsq - Create a custom DSQ 6256 * @dsq_id: DSQ to create 6257 * @node: NUMA node to allocate from 6258 * 6259 * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable 6260 * scx callback, and any BPF_PROG_TYPE_SYSCALL prog. 6261 */ 6262 __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) 6263 { 6264 struct scx_dispatch_q *dsq; 6265 struct scx_sched *sch; 6266 s32 ret; 6267 6268 if (unlikely(node >= (int)nr_node_ids || 6269 (node < 0 && node != NUMA_NO_NODE))) 6270 return -EINVAL; 6271 6272 if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN)) 6273 return -EINVAL; 6274 6275 dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node); 6276 if (!dsq) 6277 return -ENOMEM; 6278 6279 init_dsq(dsq, dsq_id); 6280 6281 rcu_read_lock(); 6282 6283 sch = rcu_dereference(scx_root); 6284 if (sch) 6285 ret = rhashtable_lookup_insert_fast(&sch->dsq_hash, &dsq->hash_node, 6286 dsq_hash_params); 6287 else 6288 ret = -ENODEV; 6289 6290 rcu_read_unlock(); 6291 if (ret) 6292 kfree(dsq); 6293 return ret; 6294 } 6295 6296 __bpf_kfunc_end_defs(); 6297 6298 BTF_KFUNCS_START(scx_kfunc_ids_unlocked) 6299 BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE) 6300 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU) 6301 BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU) 6302 BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU) 6303 BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU) 6304 BTF_KFUNCS_END(scx_kfunc_ids_unlocked) 6305 6306 static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = { 6307 .owner = THIS_MODULE, 6308 .set = &scx_kfunc_ids_unlocked, 6309 }; 6310 6311 __bpf_kfunc_start_defs(); 6312 6313 /** 6314 * scx_bpf_task_set_slice - Set task's time slice 6315 * @p: task of interest 6316 * @slice: time slice to set in nsecs 6317 * 6318 * Set @p's time slice to @slice. Returns %true on success, %false if the 6319 * calling scheduler doesn't have authority over @p. 6320 */ 6321 __bpf_kfunc bool scx_bpf_task_set_slice(struct task_struct *p, u64 slice) 6322 { 6323 p->scx.slice = slice; 6324 return true; 6325 } 6326 6327 /** 6328 * scx_bpf_task_set_dsq_vtime - Set task's virtual time for DSQ ordering 6329 * @p: task of interest 6330 * @vtime: virtual time to set 6331 * 6332 * Set @p's virtual time to @vtime. Returns %true on success, %false if the 6333 * calling scheduler doesn't have authority over @p. 6334 */ 6335 __bpf_kfunc bool scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime) 6336 { 6337 p->scx.dsq_vtime = vtime; 6338 return true; 6339 } 6340 6341 static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags) 6342 { 6343 struct rq *this_rq; 6344 unsigned long irq_flags; 6345 6346 if (!ops_cpu_valid(sch, cpu, NULL)) 6347 return; 6348 6349 local_irq_save(irq_flags); 6350 6351 this_rq = this_rq(); 6352 6353 /* 6354 * While bypassing for PM ops, IRQ handling may not be online which can 6355 * lead to irq_work_queue() malfunction such as infinite busy wait for 6356 * IRQ status update. Suppress kicking. 6357 */ 6358 if (scx_rq_bypassing(this_rq)) 6359 goto out; 6360 6361 /* 6362 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting 6363 * rq locks. We can probably be smarter and avoid bouncing if called 6364 * from ops which don't hold a rq lock. 6365 */ 6366 if (flags & SCX_KICK_IDLE) { 6367 struct rq *target_rq = cpu_rq(cpu); 6368 6369 if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT))) 6370 scx_error(sch, "PREEMPT/WAIT cannot be used with SCX_KICK_IDLE"); 6371 6372 if (raw_spin_rq_trylock(target_rq)) { 6373 if (can_skip_idle_kick(target_rq)) { 6374 raw_spin_rq_unlock(target_rq); 6375 goto out; 6376 } 6377 raw_spin_rq_unlock(target_rq); 6378 } 6379 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle); 6380 } else { 6381 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick); 6382 6383 if (flags & SCX_KICK_PREEMPT) 6384 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt); 6385 if (flags & SCX_KICK_WAIT) 6386 cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait); 6387 } 6388 6389 irq_work_queue(&this_rq->scx.kick_cpus_irq_work); 6390 out: 6391 local_irq_restore(irq_flags); 6392 } 6393 6394 /** 6395 * scx_bpf_kick_cpu - Trigger reschedule on a CPU 6396 * @cpu: cpu to kick 6397 * @flags: %SCX_KICK_* flags 6398 * 6399 * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or 6400 * trigger rescheduling on a busy CPU. This can be called from any online 6401 * scx_ops operation and the actual kicking is performed asynchronously through 6402 * an irq work. 6403 */ 6404 __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags) 6405 { 6406 struct scx_sched *sch; 6407 6408 guard(rcu)(); 6409 sch = rcu_dereference(scx_root); 6410 if (likely(sch)) 6411 scx_kick_cpu(sch, cpu, flags); 6412 } 6413 6414 /** 6415 * scx_bpf_dsq_nr_queued - Return the number of queued tasks 6416 * @dsq_id: id of the DSQ 6417 * 6418 * Return the number of tasks in the DSQ matching @dsq_id. If not found, 6419 * -%ENOENT is returned. 6420 */ 6421 __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id) 6422 { 6423 struct scx_sched *sch; 6424 struct scx_dispatch_q *dsq; 6425 s32 ret; 6426 6427 preempt_disable(); 6428 6429 sch = rcu_dereference_sched(scx_root); 6430 if (unlikely(!sch)) { 6431 ret = -ENODEV; 6432 goto out; 6433 } 6434 6435 if (dsq_id == SCX_DSQ_LOCAL) { 6436 ret = READ_ONCE(this_rq()->scx.local_dsq.nr); 6437 goto out; 6438 } else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) { 6439 s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK; 6440 6441 if (ops_cpu_valid(sch, cpu, NULL)) { 6442 ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr); 6443 goto out; 6444 } 6445 } else { 6446 dsq = find_user_dsq(sch, dsq_id); 6447 if (dsq) { 6448 ret = READ_ONCE(dsq->nr); 6449 goto out; 6450 } 6451 } 6452 ret = -ENOENT; 6453 out: 6454 preempt_enable(); 6455 return ret; 6456 } 6457 6458 /** 6459 * scx_bpf_destroy_dsq - Destroy a custom DSQ 6460 * @dsq_id: DSQ to destroy 6461 * 6462 * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with 6463 * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is 6464 * empty and no further tasks are dispatched to it. Ignored if called on a DSQ 6465 * which doesn't exist. Can be called from any online scx_ops operations. 6466 */ 6467 __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id) 6468 { 6469 struct scx_sched *sch; 6470 6471 rcu_read_lock(); 6472 sch = rcu_dereference(scx_root); 6473 if (sch) 6474 destroy_dsq(sch, dsq_id); 6475 rcu_read_unlock(); 6476 } 6477 6478 /** 6479 * bpf_iter_scx_dsq_new - Create a DSQ iterator 6480 * @it: iterator to initialize 6481 * @dsq_id: DSQ to iterate 6482 * @flags: %SCX_DSQ_ITER_* 6483 * 6484 * Initialize BPF iterator @it which can be used with bpf_for_each() to walk 6485 * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes 6486 * tasks which are already queued when this function is invoked. 6487 */ 6488 __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, 6489 u64 flags) 6490 { 6491 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 6492 struct scx_sched *sch; 6493 6494 BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) > 6495 sizeof(struct bpf_iter_scx_dsq)); 6496 BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) != 6497 __alignof__(struct bpf_iter_scx_dsq)); 6498 BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS & 6499 ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1)); 6500 6501 /* 6502 * next() and destroy() will be called regardless of the return value. 6503 * Always clear $kit->dsq. 6504 */ 6505 kit->dsq = NULL; 6506 6507 sch = rcu_dereference_check(scx_root, rcu_read_lock_bh_held()); 6508 if (unlikely(!sch)) 6509 return -ENODEV; 6510 6511 if (flags & ~__SCX_DSQ_ITER_USER_FLAGS) 6512 return -EINVAL; 6513 6514 kit->dsq = find_user_dsq(sch, dsq_id); 6515 if (!kit->dsq) 6516 return -ENOENT; 6517 6518 kit->cursor = INIT_DSQ_LIST_CURSOR(kit->cursor, flags, 6519 READ_ONCE(kit->dsq->seq)); 6520 6521 return 0; 6522 } 6523 6524 /** 6525 * bpf_iter_scx_dsq_next - Progress a DSQ iterator 6526 * @it: iterator to progress 6527 * 6528 * Return the next task. See bpf_iter_scx_dsq_new(). 6529 */ 6530 __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) 6531 { 6532 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 6533 bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV; 6534 struct task_struct *p; 6535 unsigned long flags; 6536 6537 if (!kit->dsq) 6538 return NULL; 6539 6540 raw_spin_lock_irqsave(&kit->dsq->lock, flags); 6541 6542 if (list_empty(&kit->cursor.node)) 6543 p = NULL; 6544 else 6545 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list); 6546 6547 /* 6548 * Only tasks which were queued before the iteration started are 6549 * visible. This bounds BPF iterations and guarantees that vtime never 6550 * jumps in the other direction while iterating. 6551 */ 6552 do { 6553 p = nldsq_next_task(kit->dsq, p, rev); 6554 } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq))); 6555 6556 if (p) { 6557 if (rev) 6558 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node); 6559 else 6560 list_move(&kit->cursor.node, &p->scx.dsq_list.node); 6561 } else { 6562 list_del_init(&kit->cursor.node); 6563 } 6564 6565 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags); 6566 6567 return p; 6568 } 6569 6570 /** 6571 * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator 6572 * @it: iterator to destroy 6573 * 6574 * Undo scx_iter_scx_dsq_new(). 6575 */ 6576 __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) 6577 { 6578 struct bpf_iter_scx_dsq_kern *kit = (void *)it; 6579 6580 if (!kit->dsq) 6581 return; 6582 6583 if (!list_empty(&kit->cursor.node)) { 6584 unsigned long flags; 6585 6586 raw_spin_lock_irqsave(&kit->dsq->lock, flags); 6587 list_del_init(&kit->cursor.node); 6588 raw_spin_unlock_irqrestore(&kit->dsq->lock, flags); 6589 } 6590 kit->dsq = NULL; 6591 } 6592 6593 /** 6594 * scx_bpf_dsq_peek - Lockless peek at the first element. 6595 * @dsq_id: DSQ to examine. 6596 * 6597 * Read the first element in the DSQ. This is semantically equivalent to using 6598 * the DSQ iterator, but is lockfree. Of course, like any lockless operation, 6599 * this provides only a point-in-time snapshot, and the contents may change 6600 * by the time any subsequent locking operation reads the queue. 6601 * 6602 * Returns the pointer, or NULL indicates an empty queue OR internal error. 6603 */ 6604 __bpf_kfunc struct task_struct *scx_bpf_dsq_peek(u64 dsq_id) 6605 { 6606 struct scx_sched *sch; 6607 struct scx_dispatch_q *dsq; 6608 6609 sch = rcu_dereference(scx_root); 6610 if (unlikely(!sch)) 6611 return NULL; 6612 6613 if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN)) { 6614 scx_error(sch, "peek disallowed on builtin DSQ 0x%llx", dsq_id); 6615 return NULL; 6616 } 6617 6618 dsq = find_user_dsq(sch, dsq_id); 6619 if (unlikely(!dsq)) { 6620 scx_error(sch, "peek on non-existent DSQ 0x%llx", dsq_id); 6621 return NULL; 6622 } 6623 6624 return rcu_dereference(dsq->first_task); 6625 } 6626 6627 __bpf_kfunc_end_defs(); 6628 6629 static s32 __bstr_format(struct scx_sched *sch, u64 *data_buf, char *line_buf, 6630 size_t line_size, char *fmt, unsigned long long *data, 6631 u32 data__sz) 6632 { 6633 struct bpf_bprintf_data bprintf_data = { .get_bin_args = true }; 6634 s32 ret; 6635 6636 if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 || 6637 (data__sz && !data)) { 6638 scx_error(sch, "invalid data=%p and data__sz=%u", (void *)data, data__sz); 6639 return -EINVAL; 6640 } 6641 6642 ret = copy_from_kernel_nofault(data_buf, data, data__sz); 6643 if (ret < 0) { 6644 scx_error(sch, "failed to read data fields (%d)", ret); 6645 return ret; 6646 } 6647 6648 ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8, 6649 &bprintf_data); 6650 if (ret < 0) { 6651 scx_error(sch, "format preparation failed (%d)", ret); 6652 return ret; 6653 } 6654 6655 ret = bstr_printf(line_buf, line_size, fmt, 6656 bprintf_data.bin_args); 6657 bpf_bprintf_cleanup(&bprintf_data); 6658 if (ret < 0) { 6659 scx_error(sch, "(\"%s\", %p, %u) failed to format", fmt, data, data__sz); 6660 return ret; 6661 } 6662 6663 return ret; 6664 } 6665 6666 static s32 bstr_format(struct scx_sched *sch, struct scx_bstr_buf *buf, 6667 char *fmt, unsigned long long *data, u32 data__sz) 6668 { 6669 return __bstr_format(sch, buf->data, buf->line, sizeof(buf->line), 6670 fmt, data, data__sz); 6671 } 6672 6673 __bpf_kfunc_start_defs(); 6674 6675 /** 6676 * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler. 6677 * @exit_code: Exit value to pass to user space via struct scx_exit_info. 6678 * @fmt: error message format string 6679 * @data: format string parameters packaged using ___bpf_fill() macro 6680 * @data__sz: @data len, must end in '__sz' for the verifier 6681 * 6682 * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops 6683 * disabling. 6684 */ 6685 __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt, 6686 unsigned long long *data, u32 data__sz) 6687 { 6688 struct scx_sched *sch; 6689 unsigned long flags; 6690 6691 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags); 6692 sch = rcu_dereference_bh(scx_root); 6693 if (likely(sch) && 6694 bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0) 6695 scx_exit(sch, SCX_EXIT_UNREG_BPF, exit_code, "%s", scx_exit_bstr_buf.line); 6696 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags); 6697 } 6698 6699 /** 6700 * scx_bpf_error_bstr - Indicate fatal error 6701 * @fmt: error message format string 6702 * @data: format string parameters packaged using ___bpf_fill() macro 6703 * @data__sz: @data len, must end in '__sz' for the verifier 6704 * 6705 * Indicate that the BPF scheduler encountered a fatal error and initiate ops 6706 * disabling. 6707 */ 6708 __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data, 6709 u32 data__sz) 6710 { 6711 struct scx_sched *sch; 6712 unsigned long flags; 6713 6714 raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags); 6715 sch = rcu_dereference_bh(scx_root); 6716 if (likely(sch) && 6717 bstr_format(sch, &scx_exit_bstr_buf, fmt, data, data__sz) >= 0) 6718 scx_exit(sch, SCX_EXIT_ERROR_BPF, 0, "%s", scx_exit_bstr_buf.line); 6719 raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags); 6720 } 6721 6722 /** 6723 * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler 6724 * @fmt: format string 6725 * @data: format string parameters packaged using ___bpf_fill() macro 6726 * @data__sz: @data len, must end in '__sz' for the verifier 6727 * 6728 * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and 6729 * dump_task() to generate extra debug dump specific to the BPF scheduler. 6730 * 6731 * The extra dump may be multiple lines. A single line may be split over 6732 * multiple calls. The last line is automatically terminated. 6733 */ 6734 __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, 6735 u32 data__sz) 6736 { 6737 struct scx_sched *sch; 6738 struct scx_dump_data *dd = &scx_dump_data; 6739 struct scx_bstr_buf *buf = &dd->buf; 6740 s32 ret; 6741 6742 guard(rcu)(); 6743 6744 sch = rcu_dereference(scx_root); 6745 if (unlikely(!sch)) 6746 return; 6747 6748 if (raw_smp_processor_id() != dd->cpu) { 6749 scx_error(sch, "scx_bpf_dump() must only be called from ops.dump() and friends"); 6750 return; 6751 } 6752 6753 /* append the formatted string to the line buf */ 6754 ret = __bstr_format(sch, buf->data, buf->line + dd->cursor, 6755 sizeof(buf->line) - dd->cursor, fmt, data, data__sz); 6756 if (ret < 0) { 6757 dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)", 6758 dd->prefix, fmt, data, data__sz, ret); 6759 return; 6760 } 6761 6762 dd->cursor += ret; 6763 dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line)); 6764 6765 if (!dd->cursor) 6766 return; 6767 6768 /* 6769 * If the line buf overflowed or ends in a newline, flush it into the 6770 * dump. This is to allow the caller to generate a single line over 6771 * multiple calls. As ops_dump_flush() can also handle multiple lines in 6772 * the line buf, the only case which can lead to an unexpected 6773 * truncation is when the caller keeps generating newlines in the middle 6774 * instead of the end consecutively. Don't do that. 6775 */ 6776 if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n') 6777 ops_dump_flush(); 6778 } 6779 6780 /** 6781 * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ 6782 * 6783 * Iterate over all of the tasks currently enqueued on the local DSQ of the 6784 * caller's CPU, and re-enqueue them in the BPF scheduler. Can be called from 6785 * anywhere. 6786 */ 6787 __bpf_kfunc void scx_bpf_reenqueue_local___v2(void) 6788 { 6789 struct rq *rq; 6790 6791 guard(preempt)(); 6792 6793 rq = this_rq(); 6794 local_set(&rq->scx.reenq_local_deferred, 1); 6795 schedule_deferred(rq); 6796 } 6797 6798 /** 6799 * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU 6800 * @cpu: CPU of interest 6801 * 6802 * Return the maximum relative capacity of @cpu in relation to the most 6803 * performant CPU in the system. The return value is in the range [1, 6804 * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur(). 6805 */ 6806 __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu) 6807 { 6808 struct scx_sched *sch; 6809 6810 guard(rcu)(); 6811 6812 sch = rcu_dereference(scx_root); 6813 if (likely(sch) && ops_cpu_valid(sch, cpu, NULL)) 6814 return arch_scale_cpu_capacity(cpu); 6815 else 6816 return SCX_CPUPERF_ONE; 6817 } 6818 6819 /** 6820 * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU 6821 * @cpu: CPU of interest 6822 * 6823 * Return the current relative performance of @cpu in relation to its maximum. 6824 * The return value is in the range [1, %SCX_CPUPERF_ONE]. 6825 * 6826 * The current performance level of a CPU in relation to the maximum performance 6827 * available in the system can be calculated as follows: 6828 * 6829 * scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE 6830 * 6831 * The result is in the range [1, %SCX_CPUPERF_ONE]. 6832 */ 6833 __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu) 6834 { 6835 struct scx_sched *sch; 6836 6837 guard(rcu)(); 6838 6839 sch = rcu_dereference(scx_root); 6840 if (likely(sch) && ops_cpu_valid(sch, cpu, NULL)) 6841 return arch_scale_freq_capacity(cpu); 6842 else 6843 return SCX_CPUPERF_ONE; 6844 } 6845 6846 /** 6847 * scx_bpf_cpuperf_set - Set the relative performance target of a CPU 6848 * @cpu: CPU of interest 6849 * @perf: target performance level [0, %SCX_CPUPERF_ONE] 6850 * 6851 * Set the target performance level of @cpu to @perf. @perf is in linear 6852 * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the 6853 * schedutil cpufreq governor chooses the target frequency. 6854 * 6855 * The actual performance level chosen, CPU grouping, and the overhead and 6856 * latency of the operations are dependent on the hardware and cpufreq driver in 6857 * use. Consult hardware and cpufreq documentation for more information. The 6858 * current performance level can be monitored using scx_bpf_cpuperf_cur(). 6859 */ 6860 __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf) 6861 { 6862 struct scx_sched *sch; 6863 6864 guard(rcu)(); 6865 6866 sch = rcu_dereference(scx_root); 6867 if (unlikely(!sch)) 6868 return; 6869 6870 if (unlikely(perf > SCX_CPUPERF_ONE)) { 6871 scx_error(sch, "Invalid cpuperf target %u for CPU %d", perf, cpu); 6872 return; 6873 } 6874 6875 if (ops_cpu_valid(sch, cpu, NULL)) { 6876 struct rq *rq = cpu_rq(cpu), *locked_rq = scx_locked_rq(); 6877 struct rq_flags rf; 6878 6879 /* 6880 * When called with an rq lock held, restrict the operation 6881 * to the corresponding CPU to prevent ABBA deadlocks. 6882 */ 6883 if (locked_rq && rq != locked_rq) { 6884 scx_error(sch, "Invalid target CPU %d", cpu); 6885 return; 6886 } 6887 6888 /* 6889 * If no rq lock is held, allow to operate on any CPU by 6890 * acquiring the corresponding rq lock. 6891 */ 6892 if (!locked_rq) { 6893 rq_lock_irqsave(rq, &rf); 6894 update_rq_clock(rq); 6895 } 6896 6897 rq->scx.cpuperf_target = perf; 6898 cpufreq_update_util(rq, 0); 6899 6900 if (!locked_rq) 6901 rq_unlock_irqrestore(rq, &rf); 6902 } 6903 } 6904 6905 /** 6906 * scx_bpf_nr_node_ids - Return the number of possible node IDs 6907 * 6908 * All valid node IDs in the system are smaller than the returned value. 6909 */ 6910 __bpf_kfunc u32 scx_bpf_nr_node_ids(void) 6911 { 6912 return nr_node_ids; 6913 } 6914 6915 /** 6916 * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs 6917 * 6918 * All valid CPU IDs in the system are smaller than the returned value. 6919 */ 6920 __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void) 6921 { 6922 return nr_cpu_ids; 6923 } 6924 6925 /** 6926 * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask 6927 */ 6928 __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void) 6929 { 6930 return cpu_possible_mask; 6931 } 6932 6933 /** 6934 * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask 6935 */ 6936 __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void) 6937 { 6938 return cpu_online_mask; 6939 } 6940 6941 /** 6942 * scx_bpf_put_cpumask - Release a possible/online cpumask 6943 * @cpumask: cpumask to release 6944 */ 6945 __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask) 6946 { 6947 /* 6948 * Empty function body because we aren't actually acquiring or releasing 6949 * a reference to a global cpumask, which is read-only in the caller and 6950 * is never released. The acquire / release semantics here are just used 6951 * to make the cpumask is a trusted pointer in the caller. 6952 */ 6953 } 6954 6955 /** 6956 * scx_bpf_task_running - Is task currently running? 6957 * @p: task of interest 6958 */ 6959 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p) 6960 { 6961 return task_rq(p)->curr == p; 6962 } 6963 6964 /** 6965 * scx_bpf_task_cpu - CPU a task is currently associated with 6966 * @p: task of interest 6967 */ 6968 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p) 6969 { 6970 return task_cpu(p); 6971 } 6972 6973 /** 6974 * scx_bpf_cpu_rq - Fetch the rq of a CPU 6975 * @cpu: CPU of the rq 6976 */ 6977 __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu) 6978 { 6979 struct scx_sched *sch; 6980 6981 guard(rcu)(); 6982 6983 sch = rcu_dereference(scx_root); 6984 if (unlikely(!sch)) 6985 return NULL; 6986 6987 if (!ops_cpu_valid(sch, cpu, NULL)) 6988 return NULL; 6989 6990 if (!sch->warned_deprecated_rq) { 6991 printk_deferred(KERN_WARNING "sched_ext: %s() is deprecated; " 6992 "use scx_bpf_locked_rq() when holding rq lock " 6993 "or scx_bpf_cpu_curr() to read remote curr safely.\n", __func__); 6994 sch->warned_deprecated_rq = true; 6995 } 6996 6997 return cpu_rq(cpu); 6998 } 6999 7000 /** 7001 * scx_bpf_locked_rq - Return the rq currently locked by SCX 7002 * 7003 * Returns the rq if a rq lock is currently held by SCX. 7004 * Otherwise emits an error and returns NULL. 7005 */ 7006 __bpf_kfunc struct rq *scx_bpf_locked_rq(void) 7007 { 7008 struct scx_sched *sch; 7009 struct rq *rq; 7010 7011 guard(preempt)(); 7012 7013 sch = rcu_dereference_sched(scx_root); 7014 if (unlikely(!sch)) 7015 return NULL; 7016 7017 rq = scx_locked_rq(); 7018 if (!rq) { 7019 scx_error(sch, "accessing rq without holding rq lock"); 7020 return NULL; 7021 } 7022 7023 return rq; 7024 } 7025 7026 /** 7027 * scx_bpf_cpu_curr - Return remote CPU's curr task 7028 * @cpu: CPU of interest 7029 * 7030 * Callers must hold RCU read lock (KF_RCU). 7031 */ 7032 __bpf_kfunc struct task_struct *scx_bpf_cpu_curr(s32 cpu) 7033 { 7034 struct scx_sched *sch; 7035 7036 guard(rcu)(); 7037 7038 sch = rcu_dereference(scx_root); 7039 if (unlikely(!sch)) 7040 return NULL; 7041 7042 if (!ops_cpu_valid(sch, cpu, NULL)) 7043 return NULL; 7044 7045 return rcu_dereference(cpu_rq(cpu)->curr); 7046 } 7047 7048 /** 7049 * scx_bpf_task_cgroup - Return the sched cgroup of a task 7050 * @p: task of interest 7051 * 7052 * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with 7053 * from the scheduler's POV. SCX operations should use this function to 7054 * determine @p's current cgroup as, unlike following @p->cgroups, 7055 * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all 7056 * rq-locked operations. Can be called on the parameter tasks of rq-locked 7057 * operations. The restriction guarantees that @p's rq is locked by the caller. 7058 */ 7059 #ifdef CONFIG_CGROUP_SCHED 7060 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) 7061 { 7062 struct task_group *tg = p->sched_task_group; 7063 struct cgroup *cgrp = &cgrp_dfl_root.cgrp; 7064 struct scx_sched *sch; 7065 7066 guard(rcu)(); 7067 7068 sch = rcu_dereference(scx_root); 7069 if (unlikely(!sch)) 7070 goto out; 7071 7072 if (!scx_kf_allowed_on_arg_tasks(sch, __SCX_KF_RQ_LOCKED, p)) 7073 goto out; 7074 7075 cgrp = tg_cgrp(tg); 7076 7077 out: 7078 cgroup_get(cgrp); 7079 return cgrp; 7080 } 7081 #endif 7082 7083 /** 7084 * scx_bpf_now - Returns a high-performance monotonically non-decreasing 7085 * clock for the current CPU. The clock returned is in nanoseconds. 7086 * 7087 * It provides the following properties: 7088 * 7089 * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently 7090 * to account for execution time and track tasks' runtime properties. 7091 * Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which 7092 * eventually reads a hardware timestamp counter -- is neither performant nor 7093 * scalable. scx_bpf_now() aims to provide a high-performance clock by 7094 * using the rq clock in the scheduler core whenever possible. 7095 * 7096 * 2) High enough resolution for the BPF scheduler use cases: In most BPF 7097 * scheduler use cases, the required clock resolution is lower than the most 7098 * accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically 7099 * uses the rq clock in the scheduler core whenever it is valid. It considers 7100 * that the rq clock is valid from the time the rq clock is updated 7101 * (update_rq_clock) until the rq is unlocked (rq_unpin_lock). 7102 * 7103 * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now() 7104 * guarantees the clock never goes backward when comparing them in the same 7105 * CPU. On the other hand, when comparing clocks in different CPUs, there 7106 * is no such guarantee -- the clock can go backward. It provides a 7107 * monotonically *non-decreasing* clock so that it would provide the same 7108 * clock values in two different scx_bpf_now() calls in the same CPU 7109 * during the same period of when the rq clock is valid. 7110 */ 7111 __bpf_kfunc u64 scx_bpf_now(void) 7112 { 7113 struct rq *rq; 7114 u64 clock; 7115 7116 preempt_disable(); 7117 7118 rq = this_rq(); 7119 if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) { 7120 /* 7121 * If the rq clock is valid, use the cached rq clock. 7122 * 7123 * Note that scx_bpf_now() is re-entrant between a process 7124 * context and an interrupt context (e.g., timer interrupt). 7125 * However, we don't need to consider the race between them 7126 * because such race is not observable from a caller. 7127 */ 7128 clock = READ_ONCE(rq->scx.clock); 7129 } else { 7130 /* 7131 * Otherwise, return a fresh rq clock. 7132 * 7133 * The rq clock is updated outside of the rq lock. 7134 * In this case, keep the updated rq clock invalid so the next 7135 * kfunc call outside the rq lock gets a fresh rq clock. 7136 */ 7137 clock = sched_clock_cpu(cpu_of(rq)); 7138 } 7139 7140 preempt_enable(); 7141 7142 return clock; 7143 } 7144 7145 static void scx_read_events(struct scx_sched *sch, struct scx_event_stats *events) 7146 { 7147 struct scx_event_stats *e_cpu; 7148 int cpu; 7149 7150 /* Aggregate per-CPU event counters into @events. */ 7151 memset(events, 0, sizeof(*events)); 7152 for_each_possible_cpu(cpu) { 7153 e_cpu = &per_cpu_ptr(sch->pcpu, cpu)->event_stats; 7154 scx_agg_event(events, e_cpu, SCX_EV_SELECT_CPU_FALLBACK); 7155 scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE); 7156 scx_agg_event(events, e_cpu, SCX_EV_DISPATCH_KEEP_LAST); 7157 scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_EXITING); 7158 scx_agg_event(events, e_cpu, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED); 7159 scx_agg_event(events, e_cpu, SCX_EV_REFILL_SLICE_DFL); 7160 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DURATION); 7161 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_DISPATCH); 7162 scx_agg_event(events, e_cpu, SCX_EV_BYPASS_ACTIVATE); 7163 } 7164 } 7165 7166 /* 7167 * scx_bpf_events - Get a system-wide event counter to 7168 * @events: output buffer from a BPF program 7169 * @events__sz: @events len, must end in '__sz'' for the verifier 7170 */ 7171 __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events, 7172 size_t events__sz) 7173 { 7174 struct scx_sched *sch; 7175 struct scx_event_stats e_sys; 7176 7177 rcu_read_lock(); 7178 sch = rcu_dereference(scx_root); 7179 if (sch) 7180 scx_read_events(sch, &e_sys); 7181 else 7182 memset(&e_sys, 0, sizeof(e_sys)); 7183 rcu_read_unlock(); 7184 7185 /* 7186 * We cannot entirely trust a BPF-provided size since a BPF program 7187 * might be compiled against a different vmlinux.h, of which 7188 * scx_event_stats would be larger (a newer vmlinux.h) or smaller 7189 * (an older vmlinux.h). Hence, we use the smaller size to avoid 7190 * memory corruption. 7191 */ 7192 events__sz = min(events__sz, sizeof(*events)); 7193 memcpy(events, &e_sys, events__sz); 7194 } 7195 7196 __bpf_kfunc_end_defs(); 7197 7198 BTF_KFUNCS_START(scx_kfunc_ids_any) 7199 BTF_ID_FLAGS(func, scx_bpf_task_set_slice, KF_RCU); 7200 BTF_ID_FLAGS(func, scx_bpf_task_set_dsq_vtime, KF_RCU); 7201 BTF_ID_FLAGS(func, scx_bpf_kick_cpu) 7202 BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued) 7203 BTF_ID_FLAGS(func, scx_bpf_destroy_dsq) 7204 BTF_ID_FLAGS(func, scx_bpf_dsq_peek, KF_RCU_PROTECTED | KF_RET_NULL) 7205 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED) 7206 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL) 7207 BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY) 7208 BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS) 7209 BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS) 7210 BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS) 7211 BTF_ID_FLAGS(func, scx_bpf_reenqueue_local___v2) 7212 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap) 7213 BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur) 7214 BTF_ID_FLAGS(func, scx_bpf_cpuperf_set) 7215 BTF_ID_FLAGS(func, scx_bpf_nr_node_ids) 7216 BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids) 7217 BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE) 7218 BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE) 7219 BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE) 7220 BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU) 7221 BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU) 7222 BTF_ID_FLAGS(func, scx_bpf_cpu_rq) 7223 BTF_ID_FLAGS(func, scx_bpf_locked_rq, KF_RET_NULL) 7224 BTF_ID_FLAGS(func, scx_bpf_cpu_curr, KF_RET_NULL | KF_RCU_PROTECTED) 7225 #ifdef CONFIG_CGROUP_SCHED 7226 BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE) 7227 #endif 7228 BTF_ID_FLAGS(func, scx_bpf_now) 7229 BTF_ID_FLAGS(func, scx_bpf_events, KF_TRUSTED_ARGS) 7230 BTF_KFUNCS_END(scx_kfunc_ids_any) 7231 7232 static const struct btf_kfunc_id_set scx_kfunc_set_any = { 7233 .owner = THIS_MODULE, 7234 .set = &scx_kfunc_ids_any, 7235 }; 7236 7237 static int __init scx_init(void) 7238 { 7239 int ret; 7240 7241 /* 7242 * kfunc registration can't be done from init_sched_ext_class() as 7243 * register_btf_kfunc_id_set() needs most of the system to be up. 7244 * 7245 * Some kfuncs are context-sensitive and can only be called from 7246 * specific SCX ops. They are grouped into BTF sets accordingly. 7247 * Unfortunately, BPF currently doesn't have a way of enforcing such 7248 * restrictions. Eventually, the verifier should be able to enforce 7249 * them. For now, register them the same and make each kfunc explicitly 7250 * check using scx_kf_allowed(). 7251 */ 7252 if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7253 &scx_kfunc_set_enqueue_dispatch)) || 7254 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7255 &scx_kfunc_set_dispatch)) || 7256 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7257 &scx_kfunc_set_cpu_release)) || 7258 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7259 &scx_kfunc_set_unlocked)) || 7260 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, 7261 &scx_kfunc_set_unlocked)) || 7262 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, 7263 &scx_kfunc_set_any)) || 7264 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, 7265 &scx_kfunc_set_any)) || 7266 (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, 7267 &scx_kfunc_set_any))) { 7268 pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret); 7269 return ret; 7270 } 7271 7272 ret = scx_idle_init(); 7273 if (ret) { 7274 pr_err("sched_ext: Failed to initialize idle tracking (%d)\n", ret); 7275 return ret; 7276 } 7277 7278 ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops); 7279 if (ret) { 7280 pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret); 7281 return ret; 7282 } 7283 7284 ret = register_pm_notifier(&scx_pm_notifier); 7285 if (ret) { 7286 pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret); 7287 return ret; 7288 } 7289 7290 scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj); 7291 if (!scx_kset) { 7292 pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n"); 7293 return -ENOMEM; 7294 } 7295 7296 ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group); 7297 if (ret < 0) { 7298 pr_err("sched_ext: Failed to add global attributes\n"); 7299 return ret; 7300 } 7301 7302 if (!alloc_cpumask_var(&scx_bypass_lb_donee_cpumask, GFP_KERNEL) || 7303 !alloc_cpumask_var(&scx_bypass_lb_resched_cpumask, GFP_KERNEL)) { 7304 pr_err("sched_ext: Failed to allocate cpumasks\n"); 7305 return -ENOMEM; 7306 } 7307 7308 return 0; 7309 } 7310 __initcall(scx_init); 7311