1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/syscalls.c 4 * 5 * Core kernel scheduler syscalls related code 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat 9 */ 10 #include <linux/sched.h> 11 #include <linux/cpuset.h> 12 #include <linux/sched/debug.h> 13 14 #include <uapi/linux/sched/types.h> 15 16 #include "sched.h" 17 #include "autogroup.h" 18 19 static inline int __normal_prio(int policy, int rt_prio, int nice) 20 { 21 int prio; 22 23 if (dl_policy(policy)) 24 prio = MAX_DL_PRIO - 1; 25 else if (rt_policy(policy)) 26 prio = MAX_RT_PRIO - 1 - rt_prio; 27 else 28 prio = NICE_TO_PRIO(nice); 29 30 return prio; 31 } 32 33 /* 34 * Calculate the expected normal priority: i.e. priority 35 * without taking RT-inheritance into account. Might be 36 * boosted by interactivity modifiers. Changes upon fork, 37 * setprio syscalls, and whenever the interactivity 38 * estimator recalculates. 39 */ 40 static inline int normal_prio(struct task_struct *p) 41 { 42 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); 43 } 44 45 /* 46 * Calculate the current priority, i.e. the priority 47 * taken into account by the scheduler. This value might 48 * be boosted by RT tasks, or might be boosted by 49 * interactivity modifiers. Will be RT if the task got 50 * RT-boosted. If not then it returns p->normal_prio. 51 */ 52 static int effective_prio(struct task_struct *p) 53 { 54 p->normal_prio = normal_prio(p); 55 /* 56 * If we are RT tasks or we were boosted to RT priority, 57 * keep the priority unchanged. Otherwise, update priority 58 * to the normal priority: 59 */ 60 if (!rt_prio(p->prio)) 61 return p->normal_prio; 62 return p->prio; 63 } 64 65 void set_user_nice(struct task_struct *p, long nice) 66 { 67 bool queued, running; 68 struct rq *rq; 69 int old_prio; 70 71 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 72 return; 73 /* 74 * We have to be careful, if called from sys_setpriority(), 75 * the task might be in the middle of scheduling on another CPU. 76 */ 77 CLASS(task_rq_lock, rq_guard)(p); 78 rq = rq_guard.rq; 79 80 update_rq_clock(rq); 81 82 /* 83 * The RT priorities are set via sched_setscheduler(), but we still 84 * allow the 'normal' nice value to be set - but as expected 85 * it won't have any effect on scheduling until the task is 86 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 87 */ 88 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 89 p->static_prio = NICE_TO_PRIO(nice); 90 return; 91 } 92 93 queued = task_on_rq_queued(p); 94 running = task_current(rq, p); 95 if (queued) 96 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 97 if (running) 98 put_prev_task(rq, p); 99 100 p->static_prio = NICE_TO_PRIO(nice); 101 set_load_weight(p, true); 102 old_prio = p->prio; 103 p->prio = effective_prio(p); 104 105 if (queued) 106 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 107 if (running) 108 set_next_task(rq, p); 109 110 /* 111 * If the task increased its priority or is running and 112 * lowered its priority, then reschedule its CPU: 113 */ 114 p->sched_class->prio_changed(rq, p, old_prio); 115 } 116 EXPORT_SYMBOL(set_user_nice); 117 118 /* 119 * is_nice_reduction - check if nice value is an actual reduction 120 * 121 * Similar to can_nice() but does not perform a capability check. 122 * 123 * @p: task 124 * @nice: nice value 125 */ 126 static bool is_nice_reduction(const struct task_struct *p, const int nice) 127 { 128 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 129 int nice_rlim = nice_to_rlimit(nice); 130 131 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); 132 } 133 134 /* 135 * can_nice - check if a task can reduce its nice value 136 * @p: task 137 * @nice: nice value 138 */ 139 int can_nice(const struct task_struct *p, const int nice) 140 { 141 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); 142 } 143 144 #ifdef __ARCH_WANT_SYS_NICE 145 146 /* 147 * sys_nice - change the priority of the current process. 148 * @increment: priority increment 149 * 150 * sys_setpriority is a more generic, but much slower function that 151 * does similar things. 152 */ 153 SYSCALL_DEFINE1(nice, int, increment) 154 { 155 long nice, retval; 156 157 /* 158 * Setpriority might change our priority at the same moment. 159 * We don't have to worry. Conceptually one call occurs first 160 * and we have a single winner. 161 */ 162 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 163 nice = task_nice(current) + increment; 164 165 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 166 if (increment < 0 && !can_nice(current, nice)) 167 return -EPERM; 168 169 retval = security_task_setnice(current, nice); 170 if (retval) 171 return retval; 172 173 set_user_nice(current, nice); 174 return 0; 175 } 176 177 #endif 178 179 /** 180 * task_prio - return the priority value of a given task. 181 * @p: the task in question. 182 * 183 * Return: The priority value as seen by users in /proc. 184 * 185 * sched policy return value kernel prio user prio/nice 186 * 187 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19] 188 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99] 189 * deadline -101 -1 0 190 */ 191 int task_prio(const struct task_struct *p) 192 { 193 return p->prio - MAX_RT_PRIO; 194 } 195 196 /** 197 * idle_cpu - is a given CPU idle currently? 198 * @cpu: the processor in question. 199 * 200 * Return: 1 if the CPU is currently idle. 0 otherwise. 201 */ 202 int idle_cpu(int cpu) 203 { 204 struct rq *rq = cpu_rq(cpu); 205 206 if (rq->curr != rq->idle) 207 return 0; 208 209 if (rq->nr_running) 210 return 0; 211 212 #ifdef CONFIG_SMP 213 if (rq->ttwu_pending) 214 return 0; 215 #endif 216 217 return 1; 218 } 219 220 /** 221 * available_idle_cpu - is a given CPU idle for enqueuing work. 222 * @cpu: the CPU in question. 223 * 224 * Return: 1 if the CPU is currently idle. 0 otherwise. 225 */ 226 int available_idle_cpu(int cpu) 227 { 228 if (!idle_cpu(cpu)) 229 return 0; 230 231 if (vcpu_is_preempted(cpu)) 232 return 0; 233 234 return 1; 235 } 236 237 /** 238 * idle_task - return the idle task for a given CPU. 239 * @cpu: the processor in question. 240 * 241 * Return: The idle task for the CPU @cpu. 242 */ 243 struct task_struct *idle_task(int cpu) 244 { 245 return cpu_rq(cpu)->idle; 246 } 247 248 #ifdef CONFIG_SCHED_CORE 249 int sched_core_idle_cpu(int cpu) 250 { 251 struct rq *rq = cpu_rq(cpu); 252 253 if (sched_core_enabled(rq) && rq->curr == rq->idle) 254 return 1; 255 256 return idle_cpu(cpu); 257 } 258 259 #endif 260 261 #ifdef CONFIG_SMP 262 /* 263 * This function computes an effective utilization for the given CPU, to be 264 * used for frequency selection given the linear relation: f = u * f_max. 265 * 266 * The scheduler tracks the following metrics: 267 * 268 * cpu_util_{cfs,rt,dl,irq}() 269 * cpu_bw_dl() 270 * 271 * Where the cfs,rt and dl util numbers are tracked with the same metric and 272 * synchronized windows and are thus directly comparable. 273 * 274 * The cfs,rt,dl utilization are the running times measured with rq->clock_task 275 * which excludes things like IRQ and steal-time. These latter are then accrued 276 * in the IRQ utilization. 277 * 278 * The DL bandwidth number OTOH is not a measured metric but a value computed 279 * based on the task model parameters and gives the minimal utilization 280 * required to meet deadlines. 281 */ 282 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 283 unsigned long *min, 284 unsigned long *max) 285 { 286 unsigned long util, irq, scale; 287 struct rq *rq = cpu_rq(cpu); 288 289 scale = arch_scale_cpu_capacity(cpu); 290 291 /* 292 * Early check to see if IRQ/steal time saturates the CPU, can be 293 * because of inaccuracies in how we track these -- see 294 * update_irq_load_avg(). 295 */ 296 irq = cpu_util_irq(rq); 297 if (unlikely(irq >= scale)) { 298 if (min) 299 *min = scale; 300 if (max) 301 *max = scale; 302 return scale; 303 } 304 305 if (min) { 306 /* 307 * The minimum utilization returns the highest level between: 308 * - the computed DL bandwidth needed with the IRQ pressure which 309 * steals time to the deadline task. 310 * - The minimum performance requirement for CFS and/or RT. 311 */ 312 *min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN)); 313 314 /* 315 * When an RT task is runnable and uclamp is not used, we must 316 * ensure that the task will run at maximum compute capacity. 317 */ 318 if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt)) 319 *min = max(*min, scale); 320 } 321 322 /* 323 * Because the time spend on RT/DL tasks is visible as 'lost' time to 324 * CFS tasks and we use the same metric to track the effective 325 * utilization (PELT windows are synchronized) we can directly add them 326 * to obtain the CPU's actual utilization. 327 */ 328 util = util_cfs + cpu_util_rt(rq); 329 util += cpu_util_dl(rq); 330 331 /* 332 * The maximum hint is a soft bandwidth requirement, which can be lower 333 * than the actual utilization because of uclamp_max requirements. 334 */ 335 if (max) 336 *max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX)); 337 338 if (util >= scale) 339 return scale; 340 341 /* 342 * There is still idle time; further improve the number by using the 343 * IRQ metric. Because IRQ/steal time is hidden from the task clock we 344 * need to scale the task numbers: 345 * 346 * max - irq 347 * U' = irq + --------- * U 348 * max 349 */ 350 util = scale_irq_capacity(util, irq, scale); 351 util += irq; 352 353 return min(scale, util); 354 } 355 356 unsigned long sched_cpu_util(int cpu) 357 { 358 return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL); 359 } 360 #endif /* CONFIG_SMP */ 361 362 /** 363 * find_process_by_pid - find a process with a matching PID value. 364 * @pid: the pid in question. 365 * 366 * The task of @pid, if found. %NULL otherwise. 367 */ 368 static struct task_struct *find_process_by_pid(pid_t pid) 369 { 370 return pid ? find_task_by_vpid(pid) : current; 371 } 372 373 static struct task_struct *find_get_task(pid_t pid) 374 { 375 struct task_struct *p; 376 guard(rcu)(); 377 378 p = find_process_by_pid(pid); 379 if (likely(p)) 380 get_task_struct(p); 381 382 return p; 383 } 384 385 DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T), 386 find_get_task(pid), pid_t pid) 387 388 /* 389 * sched_setparam() passes in -1 for its policy, to let the functions 390 * it calls know not to change it. 391 */ 392 #define SETPARAM_POLICY -1 393 394 static void __setscheduler_params(struct task_struct *p, 395 const struct sched_attr *attr) 396 { 397 int policy = attr->sched_policy; 398 399 if (policy == SETPARAM_POLICY) 400 policy = p->policy; 401 402 p->policy = policy; 403 404 if (dl_policy(policy)) 405 __setparam_dl(p, attr); 406 else if (fair_policy(policy)) 407 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 408 409 /* rt-policy tasks do not have a timerslack */ 410 if (task_is_realtime(p)) { 411 p->timer_slack_ns = 0; 412 } else if (p->timer_slack_ns == 0) { 413 /* when switching back to non-rt policy, restore timerslack */ 414 p->timer_slack_ns = p->default_timer_slack_ns; 415 } 416 417 /* 418 * __sched_setscheduler() ensures attr->sched_priority == 0 when 419 * !rt_policy. Always setting this ensures that things like 420 * getparam()/getattr() don't report silly values for !rt tasks. 421 */ 422 p->rt_priority = attr->sched_priority; 423 p->normal_prio = normal_prio(p); 424 set_load_weight(p, true); 425 } 426 427 /* 428 * Check the target process has a UID that matches the current process's: 429 */ 430 static bool check_same_owner(struct task_struct *p) 431 { 432 const struct cred *cred = current_cred(), *pcred; 433 guard(rcu)(); 434 435 pcred = __task_cred(p); 436 return (uid_eq(cred->euid, pcred->euid) || 437 uid_eq(cred->euid, pcred->uid)); 438 } 439 440 #ifdef CONFIG_UCLAMP_TASK 441 442 static int uclamp_validate(struct task_struct *p, 443 const struct sched_attr *attr) 444 { 445 int util_min = p->uclamp_req[UCLAMP_MIN].value; 446 int util_max = p->uclamp_req[UCLAMP_MAX].value; 447 448 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 449 util_min = attr->sched_util_min; 450 451 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) 452 return -EINVAL; 453 } 454 455 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 456 util_max = attr->sched_util_max; 457 458 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) 459 return -EINVAL; 460 } 461 462 if (util_min != -1 && util_max != -1 && util_min > util_max) 463 return -EINVAL; 464 465 /* 466 * We have valid uclamp attributes; make sure uclamp is enabled. 467 * 468 * We need to do that here, because enabling static branches is a 469 * blocking operation which obviously cannot be done while holding 470 * scheduler locks. 471 */ 472 static_branch_enable(&sched_uclamp_used); 473 474 return 0; 475 } 476 477 static bool uclamp_reset(const struct sched_attr *attr, 478 enum uclamp_id clamp_id, 479 struct uclamp_se *uc_se) 480 { 481 /* Reset on sched class change for a non user-defined clamp value. */ 482 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && 483 !uc_se->user_defined) 484 return true; 485 486 /* Reset on sched_util_{min,max} == -1. */ 487 if (clamp_id == UCLAMP_MIN && 488 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 489 attr->sched_util_min == -1) { 490 return true; 491 } 492 493 if (clamp_id == UCLAMP_MAX && 494 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 495 attr->sched_util_max == -1) { 496 return true; 497 } 498 499 return false; 500 } 501 502 static void __setscheduler_uclamp(struct task_struct *p, 503 const struct sched_attr *attr) 504 { 505 enum uclamp_id clamp_id; 506 507 for_each_clamp_id(clamp_id) { 508 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 509 unsigned int value; 510 511 if (!uclamp_reset(attr, clamp_id, uc_se)) 512 continue; 513 514 /* 515 * RT by default have a 100% boost value that could be modified 516 * at runtime. 517 */ 518 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 519 value = sysctl_sched_uclamp_util_min_rt_default; 520 else 521 value = uclamp_none(clamp_id); 522 523 uclamp_se_set(uc_se, value, false); 524 525 } 526 527 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 528 return; 529 530 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 531 attr->sched_util_min != -1) { 532 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 533 attr->sched_util_min, true); 534 } 535 536 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 537 attr->sched_util_max != -1) { 538 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 539 attr->sched_util_max, true); 540 } 541 } 542 543 #else /* !CONFIG_UCLAMP_TASK: */ 544 545 static inline int uclamp_validate(struct task_struct *p, 546 const struct sched_attr *attr) 547 { 548 return -EOPNOTSUPP; 549 } 550 static void __setscheduler_uclamp(struct task_struct *p, 551 const struct sched_attr *attr) { } 552 #endif 553 554 /* 555 * Allow unprivileged RT tasks to decrease priority. 556 * Only issue a capable test if needed and only once to avoid an audit 557 * event on permitted non-privileged operations: 558 */ 559 static int user_check_sched_setscheduler(struct task_struct *p, 560 const struct sched_attr *attr, 561 int policy, int reset_on_fork) 562 { 563 if (fair_policy(policy)) { 564 if (attr->sched_nice < task_nice(p) && 565 !is_nice_reduction(p, attr->sched_nice)) 566 goto req_priv; 567 } 568 569 if (rt_policy(policy)) { 570 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); 571 572 /* Can't set/change the rt policy: */ 573 if (policy != p->policy && !rlim_rtprio) 574 goto req_priv; 575 576 /* Can't increase priority: */ 577 if (attr->sched_priority > p->rt_priority && 578 attr->sched_priority > rlim_rtprio) 579 goto req_priv; 580 } 581 582 /* 583 * Can't set/change SCHED_DEADLINE policy at all for now 584 * (safest behavior); in the future we would like to allow 585 * unprivileged DL tasks to increase their relative deadline 586 * or reduce their runtime (both ways reducing utilization) 587 */ 588 if (dl_policy(policy)) 589 goto req_priv; 590 591 /* 592 * Treat SCHED_IDLE as nice 20. Only allow a switch to 593 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 594 */ 595 if (task_has_idle_policy(p) && !idle_policy(policy)) { 596 if (!is_nice_reduction(p, task_nice(p))) 597 goto req_priv; 598 } 599 600 /* Can't change other user's priorities: */ 601 if (!check_same_owner(p)) 602 goto req_priv; 603 604 /* Normal users shall not reset the sched_reset_on_fork flag: */ 605 if (p->sched_reset_on_fork && !reset_on_fork) 606 goto req_priv; 607 608 return 0; 609 610 req_priv: 611 if (!capable(CAP_SYS_NICE)) 612 return -EPERM; 613 614 return 0; 615 } 616 617 int __sched_setscheduler(struct task_struct *p, 618 const struct sched_attr *attr, 619 bool user, bool pi) 620 { 621 int oldpolicy = -1, policy = attr->sched_policy; 622 int retval, oldprio, newprio, queued, running; 623 const struct sched_class *prev_class; 624 struct balance_callback *head; 625 struct rq_flags rf; 626 int reset_on_fork; 627 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 628 struct rq *rq; 629 bool cpuset_locked = false; 630 631 /* The pi code expects interrupts enabled */ 632 BUG_ON(pi && in_interrupt()); 633 recheck: 634 /* Double check policy once rq lock held: */ 635 if (policy < 0) { 636 reset_on_fork = p->sched_reset_on_fork; 637 policy = oldpolicy = p->policy; 638 } else { 639 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 640 641 if (!valid_policy(policy)) 642 return -EINVAL; 643 } 644 645 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 646 return -EINVAL; 647 648 /* 649 * Valid priorities for SCHED_FIFO and SCHED_RR are 650 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, 651 * SCHED_BATCH and SCHED_IDLE is 0. 652 */ 653 if (attr->sched_priority > MAX_RT_PRIO-1) 654 return -EINVAL; 655 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 656 (rt_policy(policy) != (attr->sched_priority != 0))) 657 return -EINVAL; 658 659 if (user) { 660 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); 661 if (retval) 662 return retval; 663 664 if (attr->sched_flags & SCHED_FLAG_SUGOV) 665 return -EINVAL; 666 667 retval = security_task_setscheduler(p); 668 if (retval) 669 return retval; 670 } 671 672 /* Update task specific "requested" clamps */ 673 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 674 retval = uclamp_validate(p, attr); 675 if (retval) 676 return retval; 677 } 678 679 /* 680 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets 681 * information. 682 */ 683 if (dl_policy(policy) || dl_policy(p->policy)) { 684 cpuset_locked = true; 685 cpuset_lock(); 686 } 687 688 /* 689 * Make sure no PI-waiters arrive (or leave) while we are 690 * changing the priority of the task: 691 * 692 * To be able to change p->policy safely, the appropriate 693 * runqueue lock must be held. 694 */ 695 rq = task_rq_lock(p, &rf); 696 update_rq_clock(rq); 697 698 /* 699 * Changing the policy of the stop threads its a very bad idea: 700 */ 701 if (p == rq->stop) { 702 retval = -EINVAL; 703 goto unlock; 704 } 705 706 /* 707 * If not changing anything there's no need to proceed further, 708 * but store a possible modification of reset_on_fork. 709 */ 710 if (unlikely(policy == p->policy)) { 711 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 712 goto change; 713 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 714 goto change; 715 if (dl_policy(policy) && dl_param_changed(p, attr)) 716 goto change; 717 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 718 goto change; 719 720 p->sched_reset_on_fork = reset_on_fork; 721 retval = 0; 722 goto unlock; 723 } 724 change: 725 726 if (user) { 727 #ifdef CONFIG_RT_GROUP_SCHED 728 /* 729 * Do not allow real-time tasks into groups that have no runtime 730 * assigned. 731 */ 732 if (rt_bandwidth_enabled() && rt_policy(policy) && 733 task_group(p)->rt_bandwidth.rt_runtime == 0 && 734 !task_group_is_autogroup(task_group(p))) { 735 retval = -EPERM; 736 goto unlock; 737 } 738 #endif 739 #ifdef CONFIG_SMP 740 if (dl_bandwidth_enabled() && dl_policy(policy) && 741 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 742 cpumask_t *span = rq->rd->span; 743 744 /* 745 * Don't allow tasks with an affinity mask smaller than 746 * the entire root_domain to become SCHED_DEADLINE. We 747 * will also fail if there's no bandwidth available. 748 */ 749 if (!cpumask_subset(span, p->cpus_ptr) || 750 rq->rd->dl_bw.bw == 0) { 751 retval = -EPERM; 752 goto unlock; 753 } 754 } 755 #endif 756 } 757 758 /* Re-check policy now with rq lock held: */ 759 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 760 policy = oldpolicy = -1; 761 task_rq_unlock(rq, p, &rf); 762 if (cpuset_locked) 763 cpuset_unlock(); 764 goto recheck; 765 } 766 767 /* 768 * If setscheduling to SCHED_DEADLINE (or changing the parameters 769 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 770 * is available. 771 */ 772 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 773 retval = -EBUSY; 774 goto unlock; 775 } 776 777 p->sched_reset_on_fork = reset_on_fork; 778 oldprio = p->prio; 779 780 newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); 781 if (pi) { 782 /* 783 * Take priority boosted tasks into account. If the new 784 * effective priority is unchanged, we just store the new 785 * normal parameters and do not touch the scheduler class and 786 * the runqueue. This will be done when the task deboost 787 * itself. 788 */ 789 newprio = rt_effective_prio(p, newprio); 790 if (newprio == oldprio) 791 queue_flags &= ~DEQUEUE_MOVE; 792 } 793 794 queued = task_on_rq_queued(p); 795 running = task_current(rq, p); 796 if (queued) 797 dequeue_task(rq, p, queue_flags); 798 if (running) 799 put_prev_task(rq, p); 800 801 prev_class = p->sched_class; 802 803 if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { 804 __setscheduler_params(p, attr); 805 __setscheduler_prio(p, newprio); 806 } 807 __setscheduler_uclamp(p, attr); 808 809 if (queued) { 810 /* 811 * We enqueue to tail when the priority of a task is 812 * increased (user space view). 813 */ 814 if (oldprio < p->prio) 815 queue_flags |= ENQUEUE_HEAD; 816 817 enqueue_task(rq, p, queue_flags); 818 } 819 if (running) 820 set_next_task(rq, p); 821 822 check_class_changed(rq, p, prev_class, oldprio); 823 824 /* Avoid rq from going away on us: */ 825 preempt_disable(); 826 head = splice_balance_callbacks(rq); 827 task_rq_unlock(rq, p, &rf); 828 829 if (pi) { 830 if (cpuset_locked) 831 cpuset_unlock(); 832 rt_mutex_adjust_pi(p); 833 } 834 835 /* Run balance callbacks after we've adjusted the PI chain: */ 836 balance_callbacks(rq, head); 837 preempt_enable(); 838 839 return 0; 840 841 unlock: 842 task_rq_unlock(rq, p, &rf); 843 if (cpuset_locked) 844 cpuset_unlock(); 845 return retval; 846 } 847 848 static int _sched_setscheduler(struct task_struct *p, int policy, 849 const struct sched_param *param, bool check) 850 { 851 struct sched_attr attr = { 852 .sched_policy = policy, 853 .sched_priority = param->sched_priority, 854 .sched_nice = PRIO_TO_NICE(p->static_prio), 855 }; 856 857 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 858 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 859 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 860 policy &= ~SCHED_RESET_ON_FORK; 861 attr.sched_policy = policy; 862 } 863 864 return __sched_setscheduler(p, &attr, check, true); 865 } 866 /** 867 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 868 * @p: the task in question. 869 * @policy: new policy. 870 * @param: structure containing the new RT priority. 871 * 872 * Use sched_set_fifo(), read its comment. 873 * 874 * Return: 0 on success. An error code otherwise. 875 * 876 * NOTE that the task may be already dead. 877 */ 878 int sched_setscheduler(struct task_struct *p, int policy, 879 const struct sched_param *param) 880 { 881 return _sched_setscheduler(p, policy, param, true); 882 } 883 884 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 885 { 886 return __sched_setscheduler(p, attr, true, true); 887 } 888 889 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 890 { 891 return __sched_setscheduler(p, attr, false, true); 892 } 893 EXPORT_SYMBOL_GPL(sched_setattr_nocheck); 894 895 /** 896 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernel-space. 897 * @p: the task in question. 898 * @policy: new policy. 899 * @param: structure containing the new RT priority. 900 * 901 * Just like sched_setscheduler, only don't bother checking if the 902 * current context has permission. For example, this is needed in 903 * stop_machine(): we create temporary high priority worker threads, 904 * but our caller might not have that capability. 905 * 906 * Return: 0 on success. An error code otherwise. 907 */ 908 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 909 const struct sched_param *param) 910 { 911 return _sched_setscheduler(p, policy, param, false); 912 } 913 914 /* 915 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally 916 * incapable of resource management, which is the one thing an OS really should 917 * be doing. 918 * 919 * This is of course the reason it is limited to privileged users only. 920 * 921 * Worse still; it is fundamentally impossible to compose static priority 922 * workloads. You cannot take two correctly working static prio workloads 923 * and smash them together and still expect them to work. 924 * 925 * For this reason 'all' FIFO tasks the kernel creates are basically at: 926 * 927 * MAX_RT_PRIO / 2 928 * 929 * The administrator _MUST_ configure the system, the kernel simply doesn't 930 * know enough information to make a sensible choice. 931 */ 932 void sched_set_fifo(struct task_struct *p) 933 { 934 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; 935 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 936 } 937 EXPORT_SYMBOL_GPL(sched_set_fifo); 938 939 /* 940 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. 941 */ 942 void sched_set_fifo_low(struct task_struct *p) 943 { 944 struct sched_param sp = { .sched_priority = 1 }; 945 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 946 } 947 EXPORT_SYMBOL_GPL(sched_set_fifo_low); 948 949 void sched_set_normal(struct task_struct *p, int nice) 950 { 951 struct sched_attr attr = { 952 .sched_policy = SCHED_NORMAL, 953 .sched_nice = nice, 954 }; 955 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); 956 } 957 EXPORT_SYMBOL_GPL(sched_set_normal); 958 959 static int 960 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 961 { 962 struct sched_param lparam; 963 964 if (!param || pid < 0) 965 return -EINVAL; 966 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 967 return -EFAULT; 968 969 CLASS(find_get_task, p)(pid); 970 if (!p) 971 return -ESRCH; 972 973 return sched_setscheduler(p, policy, &lparam); 974 } 975 976 /* 977 * Mimics kernel/events/core.c perf_copy_attr(). 978 */ 979 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 980 { 981 u32 size; 982 int ret; 983 984 /* Zero the full structure, so that a short copy will be nice: */ 985 memset(attr, 0, sizeof(*attr)); 986 987 ret = get_user(size, &uattr->size); 988 if (ret) 989 return ret; 990 991 /* ABI compatibility quirk: */ 992 if (!size) 993 size = SCHED_ATTR_SIZE_VER0; 994 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 995 goto err_size; 996 997 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 998 if (ret) { 999 if (ret == -E2BIG) 1000 goto err_size; 1001 return ret; 1002 } 1003 1004 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 1005 size < SCHED_ATTR_SIZE_VER1) 1006 return -EINVAL; 1007 1008 /* 1009 * XXX: Do we want to be lenient like existing syscalls; or do we want 1010 * to be strict and return an error on out-of-bounds values? 1011 */ 1012 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 1013 1014 return 0; 1015 1016 err_size: 1017 put_user(sizeof(*attr), &uattr->size); 1018 return -E2BIG; 1019 } 1020 1021 static void get_params(struct task_struct *p, struct sched_attr *attr) 1022 { 1023 if (task_has_dl_policy(p)) 1024 __getparam_dl(p, attr); 1025 else if (task_has_rt_policy(p)) 1026 attr->sched_priority = p->rt_priority; 1027 else 1028 attr->sched_nice = task_nice(p); 1029 } 1030 1031 /** 1032 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 1033 * @pid: the pid in question. 1034 * @policy: new policy. 1035 * @param: structure containing the new RT priority. 1036 * 1037 * Return: 0 on success. An error code otherwise. 1038 */ 1039 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 1040 { 1041 if (policy < 0) 1042 return -EINVAL; 1043 1044 return do_sched_setscheduler(pid, policy, param); 1045 } 1046 1047 /** 1048 * sys_sched_setparam - set/change the RT priority of a thread 1049 * @pid: the pid in question. 1050 * @param: structure containing the new RT priority. 1051 * 1052 * Return: 0 on success. An error code otherwise. 1053 */ 1054 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 1055 { 1056 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 1057 } 1058 1059 /** 1060 * sys_sched_setattr - same as above, but with extended sched_attr 1061 * @pid: the pid in question. 1062 * @uattr: structure containing the extended parameters. 1063 * @flags: for future extension. 1064 */ 1065 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 1066 unsigned int, flags) 1067 { 1068 struct sched_attr attr; 1069 int retval; 1070 1071 if (!uattr || pid < 0 || flags) 1072 return -EINVAL; 1073 1074 retval = sched_copy_attr(uattr, &attr); 1075 if (retval) 1076 return retval; 1077 1078 if ((int)attr.sched_policy < 0) 1079 return -EINVAL; 1080 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 1081 attr.sched_policy = SETPARAM_POLICY; 1082 1083 CLASS(find_get_task, p)(pid); 1084 if (!p) 1085 return -ESRCH; 1086 1087 if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS) 1088 get_params(p, &attr); 1089 1090 return sched_setattr(p, &attr); 1091 } 1092 1093 /** 1094 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 1095 * @pid: the pid in question. 1096 * 1097 * Return: On success, the policy of the thread. Otherwise, a negative error 1098 * code. 1099 */ 1100 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 1101 { 1102 struct task_struct *p; 1103 int retval; 1104 1105 if (pid < 0) 1106 return -EINVAL; 1107 1108 guard(rcu)(); 1109 p = find_process_by_pid(pid); 1110 if (!p) 1111 return -ESRCH; 1112 1113 retval = security_task_getscheduler(p); 1114 if (!retval) { 1115 retval = p->policy; 1116 if (p->sched_reset_on_fork) 1117 retval |= SCHED_RESET_ON_FORK; 1118 } 1119 return retval; 1120 } 1121 1122 /** 1123 * sys_sched_getparam - get the RT priority of a thread 1124 * @pid: the pid in question. 1125 * @param: structure containing the RT priority. 1126 * 1127 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 1128 * code. 1129 */ 1130 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 1131 { 1132 struct sched_param lp = { .sched_priority = 0 }; 1133 struct task_struct *p; 1134 int retval; 1135 1136 if (!param || pid < 0) 1137 return -EINVAL; 1138 1139 scoped_guard (rcu) { 1140 p = find_process_by_pid(pid); 1141 if (!p) 1142 return -ESRCH; 1143 1144 retval = security_task_getscheduler(p); 1145 if (retval) 1146 return retval; 1147 1148 if (task_has_rt_policy(p)) 1149 lp.sched_priority = p->rt_priority; 1150 } 1151 1152 /* 1153 * This one might sleep, we cannot do it with a spinlock held ... 1154 */ 1155 return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 1156 } 1157 1158 /* 1159 * Copy the kernel size attribute structure (which might be larger 1160 * than what user-space knows about) to user-space. 1161 * 1162 * Note that all cases are valid: user-space buffer can be larger or 1163 * smaller than the kernel-space buffer. The usual case is that both 1164 * have the same size. 1165 */ 1166 static int 1167 sched_attr_copy_to_user(struct sched_attr __user *uattr, 1168 struct sched_attr *kattr, 1169 unsigned int usize) 1170 { 1171 unsigned int ksize = sizeof(*kattr); 1172 1173 if (!access_ok(uattr, usize)) 1174 return -EFAULT; 1175 1176 /* 1177 * sched_getattr() ABI forwards and backwards compatibility: 1178 * 1179 * If usize == ksize then we just copy everything to user-space and all is good. 1180 * 1181 * If usize < ksize then we only copy as much as user-space has space for, 1182 * this keeps ABI compatibility as well. We skip the rest. 1183 * 1184 * If usize > ksize then user-space is using a newer version of the ABI, 1185 * which part the kernel doesn't know about. Just ignore it - tooling can 1186 * detect the kernel's knowledge of attributes from the attr->size value 1187 * which is set to ksize in this case. 1188 */ 1189 kattr->size = min(usize, ksize); 1190 1191 if (copy_to_user(uattr, kattr, kattr->size)) 1192 return -EFAULT; 1193 1194 return 0; 1195 } 1196 1197 /** 1198 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 1199 * @pid: the pid in question. 1200 * @uattr: structure containing the extended parameters. 1201 * @usize: sizeof(attr) for fwd/bwd comp. 1202 * @flags: for future extension. 1203 */ 1204 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 1205 unsigned int, usize, unsigned int, flags) 1206 { 1207 struct sched_attr kattr = { }; 1208 struct task_struct *p; 1209 int retval; 1210 1211 if (!uattr || pid < 0 || usize > PAGE_SIZE || 1212 usize < SCHED_ATTR_SIZE_VER0 || flags) 1213 return -EINVAL; 1214 1215 scoped_guard (rcu) { 1216 p = find_process_by_pid(pid); 1217 if (!p) 1218 return -ESRCH; 1219 1220 retval = security_task_getscheduler(p); 1221 if (retval) 1222 return retval; 1223 1224 kattr.sched_policy = p->policy; 1225 if (p->sched_reset_on_fork) 1226 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 1227 get_params(p, &kattr); 1228 kattr.sched_flags &= SCHED_FLAG_ALL; 1229 1230 #ifdef CONFIG_UCLAMP_TASK 1231 /* 1232 * This could race with another potential updater, but this is fine 1233 * because it'll correctly read the old or the new value. We don't need 1234 * to guarantee who wins the race as long as it doesn't return garbage. 1235 */ 1236 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 1237 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 1238 #endif 1239 } 1240 1241 return sched_attr_copy_to_user(uattr, &kattr, usize); 1242 } 1243 1244 #ifdef CONFIG_SMP 1245 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 1246 { 1247 /* 1248 * If the task isn't a deadline task or admission control is 1249 * disabled then we don't care about affinity changes. 1250 */ 1251 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) 1252 return 0; 1253 1254 /* 1255 * Since bandwidth control happens on root_domain basis, 1256 * if admission test is enabled, we only admit -deadline 1257 * tasks allowed to run on all the CPUs in the task's 1258 * root_domain. 1259 */ 1260 guard(rcu)(); 1261 if (!cpumask_subset(task_rq(p)->rd->span, mask)) 1262 return -EBUSY; 1263 1264 return 0; 1265 } 1266 #endif /* CONFIG_SMP */ 1267 1268 int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) 1269 { 1270 int retval; 1271 cpumask_var_t cpus_allowed, new_mask; 1272 1273 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) 1274 return -ENOMEM; 1275 1276 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 1277 retval = -ENOMEM; 1278 goto out_free_cpus_allowed; 1279 } 1280 1281 cpuset_cpus_allowed(p, cpus_allowed); 1282 cpumask_and(new_mask, ctx->new_mask, cpus_allowed); 1283 1284 ctx->new_mask = new_mask; 1285 ctx->flags |= SCA_CHECK; 1286 1287 retval = dl_task_check_affinity(p, new_mask); 1288 if (retval) 1289 goto out_free_new_mask; 1290 1291 retval = __set_cpus_allowed_ptr(p, ctx); 1292 if (retval) 1293 goto out_free_new_mask; 1294 1295 cpuset_cpus_allowed(p, cpus_allowed); 1296 if (!cpumask_subset(new_mask, cpus_allowed)) { 1297 /* 1298 * We must have raced with a concurrent cpuset update. 1299 * Just reset the cpumask to the cpuset's cpus_allowed. 1300 */ 1301 cpumask_copy(new_mask, cpus_allowed); 1302 1303 /* 1304 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr() 1305 * will restore the previous user_cpus_ptr value. 1306 * 1307 * In the unlikely event a previous user_cpus_ptr exists, 1308 * we need to further restrict the mask to what is allowed 1309 * by that old user_cpus_ptr. 1310 */ 1311 if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) { 1312 bool empty = !cpumask_and(new_mask, new_mask, 1313 ctx->user_mask); 1314 1315 if (WARN_ON_ONCE(empty)) 1316 cpumask_copy(new_mask, cpus_allowed); 1317 } 1318 __set_cpus_allowed_ptr(p, ctx); 1319 retval = -EINVAL; 1320 } 1321 1322 out_free_new_mask: 1323 free_cpumask_var(new_mask); 1324 out_free_cpus_allowed: 1325 free_cpumask_var(cpus_allowed); 1326 return retval; 1327 } 1328 1329 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 1330 { 1331 struct affinity_context ac; 1332 struct cpumask *user_mask; 1333 int retval; 1334 1335 CLASS(find_get_task, p)(pid); 1336 if (!p) 1337 return -ESRCH; 1338 1339 if (p->flags & PF_NO_SETAFFINITY) 1340 return -EINVAL; 1341 1342 if (!check_same_owner(p)) { 1343 guard(rcu)(); 1344 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) 1345 return -EPERM; 1346 } 1347 1348 retval = security_task_setscheduler(p); 1349 if (retval) 1350 return retval; 1351 1352 /* 1353 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and 1354 * alloc_user_cpus_ptr() returns NULL. 1355 */ 1356 user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE); 1357 if (user_mask) { 1358 cpumask_copy(user_mask, in_mask); 1359 } else if (IS_ENABLED(CONFIG_SMP)) { 1360 return -ENOMEM; 1361 } 1362 1363 ac = (struct affinity_context){ 1364 .new_mask = in_mask, 1365 .user_mask = user_mask, 1366 .flags = SCA_USER, 1367 }; 1368 1369 retval = __sched_setaffinity(p, &ac); 1370 kfree(ac.user_mask); 1371 1372 return retval; 1373 } 1374 1375 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 1376 struct cpumask *new_mask) 1377 { 1378 if (len < cpumask_size()) 1379 cpumask_clear(new_mask); 1380 else if (len > cpumask_size()) 1381 len = cpumask_size(); 1382 1383 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 1384 } 1385 1386 /** 1387 * sys_sched_setaffinity - set the CPU affinity of a process 1388 * @pid: pid of the process 1389 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 1390 * @user_mask_ptr: user-space pointer to the new CPU mask 1391 * 1392 * Return: 0 on success. An error code otherwise. 1393 */ 1394 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 1395 unsigned long __user *, user_mask_ptr) 1396 { 1397 cpumask_var_t new_mask; 1398 int retval; 1399 1400 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 1401 return -ENOMEM; 1402 1403 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 1404 if (retval == 0) 1405 retval = sched_setaffinity(pid, new_mask); 1406 free_cpumask_var(new_mask); 1407 return retval; 1408 } 1409 1410 long sched_getaffinity(pid_t pid, struct cpumask *mask) 1411 { 1412 struct task_struct *p; 1413 int retval; 1414 1415 guard(rcu)(); 1416 p = find_process_by_pid(pid); 1417 if (!p) 1418 return -ESRCH; 1419 1420 retval = security_task_getscheduler(p); 1421 if (retval) 1422 return retval; 1423 1424 guard(raw_spinlock_irqsave)(&p->pi_lock); 1425 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 1426 1427 return 0; 1428 } 1429 1430 /** 1431 * sys_sched_getaffinity - get the CPU affinity of a process 1432 * @pid: pid of the process 1433 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 1434 * @user_mask_ptr: user-space pointer to hold the current CPU mask 1435 * 1436 * Return: size of CPU mask copied to user_mask_ptr on success. An 1437 * error code otherwise. 1438 */ 1439 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 1440 unsigned long __user *, user_mask_ptr) 1441 { 1442 int ret; 1443 cpumask_var_t mask; 1444 1445 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 1446 return -EINVAL; 1447 if (len & (sizeof(unsigned long)-1)) 1448 return -EINVAL; 1449 1450 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 1451 return -ENOMEM; 1452 1453 ret = sched_getaffinity(pid, mask); 1454 if (ret == 0) { 1455 unsigned int retlen = min(len, cpumask_size()); 1456 1457 if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen)) 1458 ret = -EFAULT; 1459 else 1460 ret = retlen; 1461 } 1462 free_cpumask_var(mask); 1463 1464 return ret; 1465 } 1466 1467 static void do_sched_yield(void) 1468 { 1469 struct rq_flags rf; 1470 struct rq *rq; 1471 1472 rq = this_rq_lock_irq(&rf); 1473 1474 schedstat_inc(rq->yld_count); 1475 current->sched_class->yield_task(rq); 1476 1477 preempt_disable(); 1478 rq_unlock_irq(rq, &rf); 1479 sched_preempt_enable_no_resched(); 1480 1481 schedule(); 1482 } 1483 1484 /** 1485 * sys_sched_yield - yield the current processor to other threads. 1486 * 1487 * This function yields the current CPU to other tasks. If there are no 1488 * other threads running on this CPU then this function will return. 1489 * 1490 * Return: 0. 1491 */ 1492 SYSCALL_DEFINE0(sched_yield) 1493 { 1494 do_sched_yield(); 1495 return 0; 1496 } 1497 1498 /** 1499 * yield - yield the current processor to other threads. 1500 * 1501 * Do not ever use this function, there's a 99% chance you're doing it wrong. 1502 * 1503 * The scheduler is at all times free to pick the calling task as the most 1504 * eligible task to run, if removing the yield() call from your code breaks 1505 * it, it's already broken. 1506 * 1507 * Typical broken usage is: 1508 * 1509 * while (!event) 1510 * yield(); 1511 * 1512 * where one assumes that yield() will let 'the other' process run that will 1513 * make event true. If the current task is a SCHED_FIFO task that will never 1514 * happen. Never use yield() as a progress guarantee!! 1515 * 1516 * If you want to use yield() to wait for something, use wait_event(). 1517 * If you want to use yield() to be 'nice' for others, use cond_resched(). 1518 * If you still want to use yield(), do not! 1519 */ 1520 void __sched yield(void) 1521 { 1522 set_current_state(TASK_RUNNING); 1523 do_sched_yield(); 1524 } 1525 EXPORT_SYMBOL(yield); 1526 1527 /** 1528 * yield_to - yield the current processor to another thread in 1529 * your thread group, or accelerate that thread toward the 1530 * processor it's on. 1531 * @p: target task 1532 * @preempt: whether task preemption is allowed or not 1533 * 1534 * It's the caller's job to ensure that the target task struct 1535 * can't go away on us before we can do any checks. 1536 * 1537 * Return: 1538 * true (>0) if we indeed boosted the target task. 1539 * false (0) if we failed to boost the target. 1540 * -ESRCH if there's no task to yield to. 1541 */ 1542 int __sched yield_to(struct task_struct *p, bool preempt) 1543 { 1544 struct task_struct *curr = current; 1545 struct rq *rq, *p_rq; 1546 int yielded = 0; 1547 1548 scoped_guard (irqsave) { 1549 rq = this_rq(); 1550 1551 again: 1552 p_rq = task_rq(p); 1553 /* 1554 * If we're the only runnable task on the rq and target rq also 1555 * has only one task, there's absolutely no point in yielding. 1556 */ 1557 if (rq->nr_running == 1 && p_rq->nr_running == 1) 1558 return -ESRCH; 1559 1560 guard(double_rq_lock)(rq, p_rq); 1561 if (task_rq(p) != p_rq) 1562 goto again; 1563 1564 if (!curr->sched_class->yield_to_task) 1565 return 0; 1566 1567 if (curr->sched_class != p->sched_class) 1568 return 0; 1569 1570 if (task_on_cpu(p_rq, p) || !task_is_running(p)) 1571 return 0; 1572 1573 yielded = curr->sched_class->yield_to_task(rq, p); 1574 if (yielded) { 1575 schedstat_inc(rq->yld_count); 1576 /* 1577 * Make p's CPU reschedule; pick_next_entity 1578 * takes care of fairness. 1579 */ 1580 if (preempt && rq != p_rq) 1581 resched_curr(p_rq); 1582 } 1583 } 1584 1585 if (yielded) 1586 schedule(); 1587 1588 return yielded; 1589 } 1590 EXPORT_SYMBOL_GPL(yield_to); 1591 1592 /** 1593 * sys_sched_get_priority_max - return maximum RT priority. 1594 * @policy: scheduling class. 1595 * 1596 * Return: On success, this syscall returns the maximum 1597 * rt_priority that can be used by a given scheduling class. 1598 * On failure, a negative error code is returned. 1599 */ 1600 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 1601 { 1602 int ret = -EINVAL; 1603 1604 switch (policy) { 1605 case SCHED_FIFO: 1606 case SCHED_RR: 1607 ret = MAX_RT_PRIO-1; 1608 break; 1609 case SCHED_DEADLINE: 1610 case SCHED_NORMAL: 1611 case SCHED_BATCH: 1612 case SCHED_IDLE: 1613 ret = 0; 1614 break; 1615 } 1616 return ret; 1617 } 1618 1619 /** 1620 * sys_sched_get_priority_min - return minimum RT priority. 1621 * @policy: scheduling class. 1622 * 1623 * Return: On success, this syscall returns the minimum 1624 * rt_priority that can be used by a given scheduling class. 1625 * On failure, a negative error code is returned. 1626 */ 1627 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 1628 { 1629 int ret = -EINVAL; 1630 1631 switch (policy) { 1632 case SCHED_FIFO: 1633 case SCHED_RR: 1634 ret = 1; 1635 break; 1636 case SCHED_DEADLINE: 1637 case SCHED_NORMAL: 1638 case SCHED_BATCH: 1639 case SCHED_IDLE: 1640 ret = 0; 1641 } 1642 return ret; 1643 } 1644 1645 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 1646 { 1647 unsigned int time_slice = 0; 1648 int retval; 1649 1650 if (pid < 0) 1651 return -EINVAL; 1652 1653 scoped_guard (rcu) { 1654 struct task_struct *p = find_process_by_pid(pid); 1655 if (!p) 1656 return -ESRCH; 1657 1658 retval = security_task_getscheduler(p); 1659 if (retval) 1660 return retval; 1661 1662 scoped_guard (task_rq_lock, p) { 1663 struct rq *rq = scope.rq; 1664 if (p->sched_class->get_rr_interval) 1665 time_slice = p->sched_class->get_rr_interval(rq, p); 1666 } 1667 } 1668 1669 jiffies_to_timespec64(time_slice, t); 1670 return 0; 1671 } 1672 1673 /** 1674 * sys_sched_rr_get_interval - return the default time-slice of a process. 1675 * @pid: pid of the process. 1676 * @interval: userspace pointer to the time-slice value. 1677 * 1678 * this syscall writes the default time-slice value of a given process 1679 * into the user-space timespec buffer. A value of '0' means infinity. 1680 * 1681 * Return: On success, 0 and the time-slice is in @interval. Otherwise, 1682 * an error code. 1683 */ 1684 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 1685 struct __kernel_timespec __user *, interval) 1686 { 1687 struct timespec64 t; 1688 int retval = sched_rr_get_interval(pid, &t); 1689 1690 if (retval == 0) 1691 retval = put_timespec64(&t, interval); 1692 1693 return retval; 1694 } 1695 1696 #ifdef CONFIG_COMPAT_32BIT_TIME 1697 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 1698 struct old_timespec32 __user *, interval) 1699 { 1700 struct timespec64 t; 1701 int retval = sched_rr_get_interval(pid, &t); 1702 1703 if (retval == 0) 1704 retval = put_old_timespec32(&t, interval); 1705 return retval; 1706 } 1707 #endif 1708