1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/syscalls.c 4 * 5 * Core kernel scheduler syscalls related code 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat 9 */ 10 #include <linux/sched.h> 11 #include <linux/cpuset.h> 12 #include <linux/sched/debug.h> 13 14 #include <uapi/linux/sched/types.h> 15 16 #include "sched.h" 17 #include "autogroup.h" 18 19 static inline int __normal_prio(int policy, int rt_prio, int nice) 20 { 21 int prio; 22 23 if (dl_policy(policy)) 24 prio = MAX_DL_PRIO - 1; 25 else if (rt_policy(policy)) 26 prio = MAX_RT_PRIO - 1 - rt_prio; 27 else 28 prio = NICE_TO_PRIO(nice); 29 30 return prio; 31 } 32 33 /* 34 * Calculate the expected normal priority: i.e. priority 35 * without taking RT-inheritance into account. Might be 36 * boosted by interactivity modifiers. Changes upon fork, 37 * setprio syscalls, and whenever the interactivity 38 * estimator recalculates. 39 */ 40 static inline int normal_prio(struct task_struct *p) 41 { 42 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); 43 } 44 45 /* 46 * Calculate the current priority, i.e. the priority 47 * taken into account by the scheduler. This value might 48 * be boosted by RT tasks, or might be boosted by 49 * interactivity modifiers. Will be RT if the task got 50 * RT-boosted. If not then it returns p->normal_prio. 51 */ 52 static int effective_prio(struct task_struct *p) 53 { 54 p->normal_prio = normal_prio(p); 55 /* 56 * If we are RT tasks or we were boosted to RT priority, 57 * keep the priority unchanged. Otherwise, update priority 58 * to the normal priority: 59 */ 60 if (!rt_prio(p->prio)) 61 return p->normal_prio; 62 return p->prio; 63 } 64 65 void set_user_nice(struct task_struct *p, long nice) 66 { 67 bool queued, running; 68 struct rq *rq; 69 int old_prio; 70 71 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 72 return; 73 /* 74 * We have to be careful, if called from sys_setpriority(), 75 * the task might be in the middle of scheduling on another CPU. 76 */ 77 CLASS(task_rq_lock, rq_guard)(p); 78 rq = rq_guard.rq; 79 80 update_rq_clock(rq); 81 82 /* 83 * The RT priorities are set via sched_setscheduler(), but we still 84 * allow the 'normal' nice value to be set - but as expected 85 * it won't have any effect on scheduling until the task is 86 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 87 */ 88 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 89 p->static_prio = NICE_TO_PRIO(nice); 90 return; 91 } 92 93 queued = task_on_rq_queued(p); 94 running = task_current(rq, p); 95 if (queued) 96 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 97 if (running) 98 put_prev_task(rq, p); 99 100 p->static_prio = NICE_TO_PRIO(nice); 101 set_load_weight(p, true); 102 old_prio = p->prio; 103 p->prio = effective_prio(p); 104 105 if (queued) 106 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 107 if (running) 108 set_next_task(rq, p); 109 110 /* 111 * If the task increased its priority or is running and 112 * lowered its priority, then reschedule its CPU: 113 */ 114 p->sched_class->prio_changed(rq, p, old_prio); 115 } 116 EXPORT_SYMBOL(set_user_nice); 117 118 /* 119 * is_nice_reduction - check if nice value is an actual reduction 120 * 121 * Similar to can_nice() but does not perform a capability check. 122 * 123 * @p: task 124 * @nice: nice value 125 */ 126 static bool is_nice_reduction(const struct task_struct *p, const int nice) 127 { 128 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 129 int nice_rlim = nice_to_rlimit(nice); 130 131 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); 132 } 133 134 /* 135 * can_nice - check if a task can reduce its nice value 136 * @p: task 137 * @nice: nice value 138 */ 139 int can_nice(const struct task_struct *p, const int nice) 140 { 141 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); 142 } 143 144 #ifdef __ARCH_WANT_SYS_NICE 145 146 /* 147 * sys_nice - change the priority of the current process. 148 * @increment: priority increment 149 * 150 * sys_setpriority is a more generic, but much slower function that 151 * does similar things. 152 */ 153 SYSCALL_DEFINE1(nice, int, increment) 154 { 155 long nice, retval; 156 157 /* 158 * Setpriority might change our priority at the same moment. 159 * We don't have to worry. Conceptually one call occurs first 160 * and we have a single winner. 161 */ 162 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 163 nice = task_nice(current) + increment; 164 165 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 166 if (increment < 0 && !can_nice(current, nice)) 167 return -EPERM; 168 169 retval = security_task_setnice(current, nice); 170 if (retval) 171 return retval; 172 173 set_user_nice(current, nice); 174 return 0; 175 } 176 177 #endif 178 179 /** 180 * task_prio - return the priority value of a given task. 181 * @p: the task in question. 182 * 183 * Return: The priority value as seen by users in /proc. 184 * 185 * sched policy return value kernel prio user prio/nice 186 * 187 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19] 188 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99] 189 * deadline -101 -1 0 190 */ 191 int task_prio(const struct task_struct *p) 192 { 193 return p->prio - MAX_RT_PRIO; 194 } 195 196 /** 197 * idle_cpu - is a given CPU idle currently? 198 * @cpu: the processor in question. 199 * 200 * Return: 1 if the CPU is currently idle. 0 otherwise. 201 */ 202 int idle_cpu(int cpu) 203 { 204 struct rq *rq = cpu_rq(cpu); 205 206 if (rq->curr != rq->idle) 207 return 0; 208 209 if (rq->nr_running) 210 return 0; 211 212 #ifdef CONFIG_SMP 213 if (rq->ttwu_pending) 214 return 0; 215 #endif 216 217 return 1; 218 } 219 220 /** 221 * available_idle_cpu - is a given CPU idle for enqueuing work. 222 * @cpu: the CPU in question. 223 * 224 * Return: 1 if the CPU is currently idle. 0 otherwise. 225 */ 226 int available_idle_cpu(int cpu) 227 { 228 if (!idle_cpu(cpu)) 229 return 0; 230 231 if (vcpu_is_preempted(cpu)) 232 return 0; 233 234 return 1; 235 } 236 237 /** 238 * idle_task - return the idle task for a given CPU. 239 * @cpu: the processor in question. 240 * 241 * Return: The idle task for the CPU @cpu. 242 */ 243 struct task_struct *idle_task(int cpu) 244 { 245 return cpu_rq(cpu)->idle; 246 } 247 248 #ifdef CONFIG_SCHED_CORE 249 int sched_core_idle_cpu(int cpu) 250 { 251 struct rq *rq = cpu_rq(cpu); 252 253 if (sched_core_enabled(rq) && rq->curr == rq->idle) 254 return 1; 255 256 return idle_cpu(cpu); 257 } 258 259 #endif 260 261 #ifdef CONFIG_SMP 262 /* 263 * This function computes an effective utilization for the given CPU, to be 264 * used for frequency selection given the linear relation: f = u * f_max. 265 * 266 * The scheduler tracks the following metrics: 267 * 268 * cpu_util_{cfs,rt,dl,irq}() 269 * cpu_bw_dl() 270 * 271 * Where the cfs,rt and dl util numbers are tracked with the same metric and 272 * synchronized windows and are thus directly comparable. 273 * 274 * The cfs,rt,dl utilization are the running times measured with rq->clock_task 275 * which excludes things like IRQ and steal-time. These latter are then accrued 276 * in the IRQ utilization. 277 * 278 * The DL bandwidth number OTOH is not a measured metric but a value computed 279 * based on the task model parameters and gives the minimal utilization 280 * required to meet deadlines. 281 */ 282 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 283 unsigned long *min, 284 unsigned long *max) 285 { 286 unsigned long util, irq, scale; 287 struct rq *rq = cpu_rq(cpu); 288 289 scale = arch_scale_cpu_capacity(cpu); 290 291 /* 292 * Early check to see if IRQ/steal time saturates the CPU, can be 293 * because of inaccuracies in how we track these -- see 294 * update_irq_load_avg(). 295 */ 296 irq = cpu_util_irq(rq); 297 if (unlikely(irq >= scale)) { 298 if (min) 299 *min = scale; 300 if (max) 301 *max = scale; 302 return scale; 303 } 304 305 if (min) { 306 /* 307 * The minimum utilization returns the highest level between: 308 * - the computed DL bandwidth needed with the IRQ pressure which 309 * steals time to the deadline task. 310 * - The minimum performance requirement for CFS and/or RT. 311 */ 312 *min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN)); 313 314 /* 315 * When an RT task is runnable and uclamp is not used, we must 316 * ensure that the task will run at maximum compute capacity. 317 */ 318 if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt)) 319 *min = max(*min, scale); 320 } 321 322 /* 323 * Because the time spend on RT/DL tasks is visible as 'lost' time to 324 * CFS tasks and we use the same metric to track the effective 325 * utilization (PELT windows are synchronized) we can directly add them 326 * to obtain the CPU's actual utilization. 327 */ 328 util = util_cfs + cpu_util_rt(rq); 329 util += cpu_util_dl(rq); 330 331 /* 332 * The maximum hint is a soft bandwidth requirement, which can be lower 333 * than the actual utilization because of uclamp_max requirements. 334 */ 335 if (max) 336 *max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX)); 337 338 if (util >= scale) 339 return scale; 340 341 /* 342 * There is still idle time; further improve the number by using the 343 * IRQ metric. Because IRQ/steal time is hidden from the task clock we 344 * need to scale the task numbers: 345 * 346 * max - irq 347 * U' = irq + --------- * U 348 * max 349 */ 350 util = scale_irq_capacity(util, irq, scale); 351 util += irq; 352 353 return min(scale, util); 354 } 355 356 unsigned long sched_cpu_util(int cpu) 357 { 358 return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL); 359 } 360 #endif /* CONFIG_SMP */ 361 362 /** 363 * find_process_by_pid - find a process with a matching PID value. 364 * @pid: the pid in question. 365 * 366 * The task of @pid, if found. %NULL otherwise. 367 */ 368 static struct task_struct *find_process_by_pid(pid_t pid) 369 { 370 return pid ? find_task_by_vpid(pid) : current; 371 } 372 373 static struct task_struct *find_get_task(pid_t pid) 374 { 375 struct task_struct *p; 376 guard(rcu)(); 377 378 p = find_process_by_pid(pid); 379 if (likely(p)) 380 get_task_struct(p); 381 382 return p; 383 } 384 385 DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T), 386 find_get_task(pid), pid_t pid) 387 388 /* 389 * sched_setparam() passes in -1 for its policy, to let the functions 390 * it calls know not to change it. 391 */ 392 #define SETPARAM_POLICY -1 393 394 static void __setscheduler_params(struct task_struct *p, 395 const struct sched_attr *attr) 396 { 397 int policy = attr->sched_policy; 398 399 if (policy == SETPARAM_POLICY) 400 policy = p->policy; 401 402 p->policy = policy; 403 404 if (dl_policy(policy)) 405 __setparam_dl(p, attr); 406 else if (fair_policy(policy)) 407 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 408 409 /* 410 * __sched_setscheduler() ensures attr->sched_priority == 0 when 411 * !rt_policy. Always setting this ensures that things like 412 * getparam()/getattr() don't report silly values for !rt tasks. 413 */ 414 p->rt_priority = attr->sched_priority; 415 p->normal_prio = normal_prio(p); 416 set_load_weight(p, true); 417 } 418 419 /* 420 * Check the target process has a UID that matches the current process's: 421 */ 422 static bool check_same_owner(struct task_struct *p) 423 { 424 const struct cred *cred = current_cred(), *pcred; 425 guard(rcu)(); 426 427 pcred = __task_cred(p); 428 return (uid_eq(cred->euid, pcred->euid) || 429 uid_eq(cred->euid, pcred->uid)); 430 } 431 432 #ifdef CONFIG_UCLAMP_TASK 433 434 static int uclamp_validate(struct task_struct *p, 435 const struct sched_attr *attr) 436 { 437 int util_min = p->uclamp_req[UCLAMP_MIN].value; 438 int util_max = p->uclamp_req[UCLAMP_MAX].value; 439 440 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 441 util_min = attr->sched_util_min; 442 443 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) 444 return -EINVAL; 445 } 446 447 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 448 util_max = attr->sched_util_max; 449 450 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) 451 return -EINVAL; 452 } 453 454 if (util_min != -1 && util_max != -1 && util_min > util_max) 455 return -EINVAL; 456 457 /* 458 * We have valid uclamp attributes; make sure uclamp is enabled. 459 * 460 * We need to do that here, because enabling static branches is a 461 * blocking operation which obviously cannot be done while holding 462 * scheduler locks. 463 */ 464 static_branch_enable(&sched_uclamp_used); 465 466 return 0; 467 } 468 469 static bool uclamp_reset(const struct sched_attr *attr, 470 enum uclamp_id clamp_id, 471 struct uclamp_se *uc_se) 472 { 473 /* Reset on sched class change for a non user-defined clamp value. */ 474 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && 475 !uc_se->user_defined) 476 return true; 477 478 /* Reset on sched_util_{min,max} == -1. */ 479 if (clamp_id == UCLAMP_MIN && 480 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 481 attr->sched_util_min == -1) { 482 return true; 483 } 484 485 if (clamp_id == UCLAMP_MAX && 486 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 487 attr->sched_util_max == -1) { 488 return true; 489 } 490 491 return false; 492 } 493 494 static void __setscheduler_uclamp(struct task_struct *p, 495 const struct sched_attr *attr) 496 { 497 enum uclamp_id clamp_id; 498 499 for_each_clamp_id(clamp_id) { 500 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 501 unsigned int value; 502 503 if (!uclamp_reset(attr, clamp_id, uc_se)) 504 continue; 505 506 /* 507 * RT by default have a 100% boost value that could be modified 508 * at runtime. 509 */ 510 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 511 value = sysctl_sched_uclamp_util_min_rt_default; 512 else 513 value = uclamp_none(clamp_id); 514 515 uclamp_se_set(uc_se, value, false); 516 517 } 518 519 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 520 return; 521 522 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 523 attr->sched_util_min != -1) { 524 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 525 attr->sched_util_min, true); 526 } 527 528 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 529 attr->sched_util_max != -1) { 530 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 531 attr->sched_util_max, true); 532 } 533 } 534 535 #else /* !CONFIG_UCLAMP_TASK: */ 536 537 static inline int uclamp_validate(struct task_struct *p, 538 const struct sched_attr *attr) 539 { 540 return -EOPNOTSUPP; 541 } 542 static void __setscheduler_uclamp(struct task_struct *p, 543 const struct sched_attr *attr) { } 544 #endif 545 546 /* 547 * Allow unprivileged RT tasks to decrease priority. 548 * Only issue a capable test if needed and only once to avoid an audit 549 * event on permitted non-privileged operations: 550 */ 551 static int user_check_sched_setscheduler(struct task_struct *p, 552 const struct sched_attr *attr, 553 int policy, int reset_on_fork) 554 { 555 if (fair_policy(policy)) { 556 if (attr->sched_nice < task_nice(p) && 557 !is_nice_reduction(p, attr->sched_nice)) 558 goto req_priv; 559 } 560 561 if (rt_policy(policy)) { 562 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); 563 564 /* Can't set/change the rt policy: */ 565 if (policy != p->policy && !rlim_rtprio) 566 goto req_priv; 567 568 /* Can't increase priority: */ 569 if (attr->sched_priority > p->rt_priority && 570 attr->sched_priority > rlim_rtprio) 571 goto req_priv; 572 } 573 574 /* 575 * Can't set/change SCHED_DEADLINE policy at all for now 576 * (safest behavior); in the future we would like to allow 577 * unprivileged DL tasks to increase their relative deadline 578 * or reduce their runtime (both ways reducing utilization) 579 */ 580 if (dl_policy(policy)) 581 goto req_priv; 582 583 /* 584 * Treat SCHED_IDLE as nice 20. Only allow a switch to 585 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 586 */ 587 if (task_has_idle_policy(p) && !idle_policy(policy)) { 588 if (!is_nice_reduction(p, task_nice(p))) 589 goto req_priv; 590 } 591 592 /* Can't change other user's priorities: */ 593 if (!check_same_owner(p)) 594 goto req_priv; 595 596 /* Normal users shall not reset the sched_reset_on_fork flag: */ 597 if (p->sched_reset_on_fork && !reset_on_fork) 598 goto req_priv; 599 600 return 0; 601 602 req_priv: 603 if (!capable(CAP_SYS_NICE)) 604 return -EPERM; 605 606 return 0; 607 } 608 609 int __sched_setscheduler(struct task_struct *p, 610 const struct sched_attr *attr, 611 bool user, bool pi) 612 { 613 int oldpolicy = -1, policy = attr->sched_policy; 614 int retval, oldprio, newprio, queued, running; 615 const struct sched_class *prev_class; 616 struct balance_callback *head; 617 struct rq_flags rf; 618 int reset_on_fork; 619 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 620 struct rq *rq; 621 bool cpuset_locked = false; 622 623 /* The pi code expects interrupts enabled */ 624 BUG_ON(pi && in_interrupt()); 625 recheck: 626 /* Double check policy once rq lock held: */ 627 if (policy < 0) { 628 reset_on_fork = p->sched_reset_on_fork; 629 policy = oldpolicy = p->policy; 630 } else { 631 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 632 633 if (!valid_policy(policy)) 634 return -EINVAL; 635 } 636 637 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 638 return -EINVAL; 639 640 /* 641 * Valid priorities for SCHED_FIFO and SCHED_RR are 642 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, 643 * SCHED_BATCH and SCHED_IDLE is 0. 644 */ 645 if (attr->sched_priority > MAX_RT_PRIO-1) 646 return -EINVAL; 647 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 648 (rt_policy(policy) != (attr->sched_priority != 0))) 649 return -EINVAL; 650 651 if (user) { 652 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); 653 if (retval) 654 return retval; 655 656 if (attr->sched_flags & SCHED_FLAG_SUGOV) 657 return -EINVAL; 658 659 retval = security_task_setscheduler(p); 660 if (retval) 661 return retval; 662 } 663 664 /* Update task specific "requested" clamps */ 665 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 666 retval = uclamp_validate(p, attr); 667 if (retval) 668 return retval; 669 } 670 671 /* 672 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets 673 * information. 674 */ 675 if (dl_policy(policy) || dl_policy(p->policy)) { 676 cpuset_locked = true; 677 cpuset_lock(); 678 } 679 680 /* 681 * Make sure no PI-waiters arrive (or leave) while we are 682 * changing the priority of the task: 683 * 684 * To be able to change p->policy safely, the appropriate 685 * runqueue lock must be held. 686 */ 687 rq = task_rq_lock(p, &rf); 688 update_rq_clock(rq); 689 690 /* 691 * Changing the policy of the stop threads its a very bad idea: 692 */ 693 if (p == rq->stop) { 694 retval = -EINVAL; 695 goto unlock; 696 } 697 698 /* 699 * If not changing anything there's no need to proceed further, 700 * but store a possible modification of reset_on_fork. 701 */ 702 if (unlikely(policy == p->policy)) { 703 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 704 goto change; 705 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 706 goto change; 707 if (dl_policy(policy) && dl_param_changed(p, attr)) 708 goto change; 709 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 710 goto change; 711 712 p->sched_reset_on_fork = reset_on_fork; 713 retval = 0; 714 goto unlock; 715 } 716 change: 717 718 if (user) { 719 #ifdef CONFIG_RT_GROUP_SCHED 720 /* 721 * Do not allow real-time tasks into groups that have no runtime 722 * assigned. 723 */ 724 if (rt_bandwidth_enabled() && rt_policy(policy) && 725 task_group(p)->rt_bandwidth.rt_runtime == 0 && 726 !task_group_is_autogroup(task_group(p))) { 727 retval = -EPERM; 728 goto unlock; 729 } 730 #endif 731 #ifdef CONFIG_SMP 732 if (dl_bandwidth_enabled() && dl_policy(policy) && 733 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 734 cpumask_t *span = rq->rd->span; 735 736 /* 737 * Don't allow tasks with an affinity mask smaller than 738 * the entire root_domain to become SCHED_DEADLINE. We 739 * will also fail if there's no bandwidth available. 740 */ 741 if (!cpumask_subset(span, p->cpus_ptr) || 742 rq->rd->dl_bw.bw == 0) { 743 retval = -EPERM; 744 goto unlock; 745 } 746 } 747 #endif 748 } 749 750 /* Re-check policy now with rq lock held: */ 751 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 752 policy = oldpolicy = -1; 753 task_rq_unlock(rq, p, &rf); 754 if (cpuset_locked) 755 cpuset_unlock(); 756 goto recheck; 757 } 758 759 /* 760 * If setscheduling to SCHED_DEADLINE (or changing the parameters 761 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 762 * is available. 763 */ 764 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 765 retval = -EBUSY; 766 goto unlock; 767 } 768 769 p->sched_reset_on_fork = reset_on_fork; 770 oldprio = p->prio; 771 772 newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); 773 if (pi) { 774 /* 775 * Take priority boosted tasks into account. If the new 776 * effective priority is unchanged, we just store the new 777 * normal parameters and do not touch the scheduler class and 778 * the runqueue. This will be done when the task deboost 779 * itself. 780 */ 781 newprio = rt_effective_prio(p, newprio); 782 if (newprio == oldprio) 783 queue_flags &= ~DEQUEUE_MOVE; 784 } 785 786 queued = task_on_rq_queued(p); 787 running = task_current(rq, p); 788 if (queued) 789 dequeue_task(rq, p, queue_flags); 790 if (running) 791 put_prev_task(rq, p); 792 793 prev_class = p->sched_class; 794 795 if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { 796 __setscheduler_params(p, attr); 797 __setscheduler_prio(p, newprio); 798 } 799 __setscheduler_uclamp(p, attr); 800 check_class_changing(rq, p, prev_class); 801 802 if (queued) { 803 /* 804 * We enqueue to tail when the priority of a task is 805 * increased (user space view). 806 */ 807 if (oldprio < p->prio) 808 queue_flags |= ENQUEUE_HEAD; 809 810 enqueue_task(rq, p, queue_flags); 811 } 812 if (running) 813 set_next_task(rq, p); 814 815 check_class_changed(rq, p, prev_class, oldprio); 816 817 /* Avoid rq from going away on us: */ 818 preempt_disable(); 819 head = splice_balance_callbacks(rq); 820 task_rq_unlock(rq, p, &rf); 821 822 if (pi) { 823 if (cpuset_locked) 824 cpuset_unlock(); 825 rt_mutex_adjust_pi(p); 826 } 827 828 /* Run balance callbacks after we've adjusted the PI chain: */ 829 balance_callbacks(rq, head); 830 preempt_enable(); 831 832 return 0; 833 834 unlock: 835 task_rq_unlock(rq, p, &rf); 836 if (cpuset_locked) 837 cpuset_unlock(); 838 return retval; 839 } 840 841 static int _sched_setscheduler(struct task_struct *p, int policy, 842 const struct sched_param *param, bool check) 843 { 844 struct sched_attr attr = { 845 .sched_policy = policy, 846 .sched_priority = param->sched_priority, 847 .sched_nice = PRIO_TO_NICE(p->static_prio), 848 }; 849 850 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 851 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 852 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 853 policy &= ~SCHED_RESET_ON_FORK; 854 attr.sched_policy = policy; 855 } 856 857 return __sched_setscheduler(p, &attr, check, true); 858 } 859 /** 860 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 861 * @p: the task in question. 862 * @policy: new policy. 863 * @param: structure containing the new RT priority. 864 * 865 * Use sched_set_fifo(), read its comment. 866 * 867 * Return: 0 on success. An error code otherwise. 868 * 869 * NOTE that the task may be already dead. 870 */ 871 int sched_setscheduler(struct task_struct *p, int policy, 872 const struct sched_param *param) 873 { 874 return _sched_setscheduler(p, policy, param, true); 875 } 876 877 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 878 { 879 return __sched_setscheduler(p, attr, true, true); 880 } 881 882 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 883 { 884 return __sched_setscheduler(p, attr, false, true); 885 } 886 EXPORT_SYMBOL_GPL(sched_setattr_nocheck); 887 888 /** 889 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernel-space. 890 * @p: the task in question. 891 * @policy: new policy. 892 * @param: structure containing the new RT priority. 893 * 894 * Just like sched_setscheduler, only don't bother checking if the 895 * current context has permission. For example, this is needed in 896 * stop_machine(): we create temporary high priority worker threads, 897 * but our caller might not have that capability. 898 * 899 * Return: 0 on success. An error code otherwise. 900 */ 901 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 902 const struct sched_param *param) 903 { 904 return _sched_setscheduler(p, policy, param, false); 905 } 906 907 /* 908 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally 909 * incapable of resource management, which is the one thing an OS really should 910 * be doing. 911 * 912 * This is of course the reason it is limited to privileged users only. 913 * 914 * Worse still; it is fundamentally impossible to compose static priority 915 * workloads. You cannot take two correctly working static prio workloads 916 * and smash them together and still expect them to work. 917 * 918 * For this reason 'all' FIFO tasks the kernel creates are basically at: 919 * 920 * MAX_RT_PRIO / 2 921 * 922 * The administrator _MUST_ configure the system, the kernel simply doesn't 923 * know enough information to make a sensible choice. 924 */ 925 void sched_set_fifo(struct task_struct *p) 926 { 927 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; 928 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 929 } 930 EXPORT_SYMBOL_GPL(sched_set_fifo); 931 932 /* 933 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. 934 */ 935 void sched_set_fifo_low(struct task_struct *p) 936 { 937 struct sched_param sp = { .sched_priority = 1 }; 938 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 939 } 940 EXPORT_SYMBOL_GPL(sched_set_fifo_low); 941 942 void sched_set_normal(struct task_struct *p, int nice) 943 { 944 struct sched_attr attr = { 945 .sched_policy = SCHED_NORMAL, 946 .sched_nice = nice, 947 }; 948 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); 949 } 950 EXPORT_SYMBOL_GPL(sched_set_normal); 951 952 static int 953 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 954 { 955 struct sched_param lparam; 956 957 if (!param || pid < 0) 958 return -EINVAL; 959 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 960 return -EFAULT; 961 962 CLASS(find_get_task, p)(pid); 963 if (!p) 964 return -ESRCH; 965 966 return sched_setscheduler(p, policy, &lparam); 967 } 968 969 /* 970 * Mimics kernel/events/core.c perf_copy_attr(). 971 */ 972 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 973 { 974 u32 size; 975 int ret; 976 977 /* Zero the full structure, so that a short copy will be nice: */ 978 memset(attr, 0, sizeof(*attr)); 979 980 ret = get_user(size, &uattr->size); 981 if (ret) 982 return ret; 983 984 /* ABI compatibility quirk: */ 985 if (!size) 986 size = SCHED_ATTR_SIZE_VER0; 987 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 988 goto err_size; 989 990 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 991 if (ret) { 992 if (ret == -E2BIG) 993 goto err_size; 994 return ret; 995 } 996 997 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 998 size < SCHED_ATTR_SIZE_VER1) 999 return -EINVAL; 1000 1001 /* 1002 * XXX: Do we want to be lenient like existing syscalls; or do we want 1003 * to be strict and return an error on out-of-bounds values? 1004 */ 1005 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 1006 1007 return 0; 1008 1009 err_size: 1010 put_user(sizeof(*attr), &uattr->size); 1011 return -E2BIG; 1012 } 1013 1014 static void get_params(struct task_struct *p, struct sched_attr *attr) 1015 { 1016 if (task_has_dl_policy(p)) 1017 __getparam_dl(p, attr); 1018 else if (task_has_rt_policy(p)) 1019 attr->sched_priority = p->rt_priority; 1020 else 1021 attr->sched_nice = task_nice(p); 1022 } 1023 1024 /** 1025 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 1026 * @pid: the pid in question. 1027 * @policy: new policy. 1028 * @param: structure containing the new RT priority. 1029 * 1030 * Return: 0 on success. An error code otherwise. 1031 */ 1032 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 1033 { 1034 if (policy < 0) 1035 return -EINVAL; 1036 1037 return do_sched_setscheduler(pid, policy, param); 1038 } 1039 1040 /** 1041 * sys_sched_setparam - set/change the RT priority of a thread 1042 * @pid: the pid in question. 1043 * @param: structure containing the new RT priority. 1044 * 1045 * Return: 0 on success. An error code otherwise. 1046 */ 1047 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 1048 { 1049 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 1050 } 1051 1052 /** 1053 * sys_sched_setattr - same as above, but with extended sched_attr 1054 * @pid: the pid in question. 1055 * @uattr: structure containing the extended parameters. 1056 * @flags: for future extension. 1057 */ 1058 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 1059 unsigned int, flags) 1060 { 1061 struct sched_attr attr; 1062 int retval; 1063 1064 if (!uattr || pid < 0 || flags) 1065 return -EINVAL; 1066 1067 retval = sched_copy_attr(uattr, &attr); 1068 if (retval) 1069 return retval; 1070 1071 if ((int)attr.sched_policy < 0) 1072 return -EINVAL; 1073 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 1074 attr.sched_policy = SETPARAM_POLICY; 1075 1076 CLASS(find_get_task, p)(pid); 1077 if (!p) 1078 return -ESRCH; 1079 1080 if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS) 1081 get_params(p, &attr); 1082 1083 return sched_setattr(p, &attr); 1084 } 1085 1086 /** 1087 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 1088 * @pid: the pid in question. 1089 * 1090 * Return: On success, the policy of the thread. Otherwise, a negative error 1091 * code. 1092 */ 1093 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 1094 { 1095 struct task_struct *p; 1096 int retval; 1097 1098 if (pid < 0) 1099 return -EINVAL; 1100 1101 guard(rcu)(); 1102 p = find_process_by_pid(pid); 1103 if (!p) 1104 return -ESRCH; 1105 1106 retval = security_task_getscheduler(p); 1107 if (!retval) { 1108 retval = p->policy; 1109 if (p->sched_reset_on_fork) 1110 retval |= SCHED_RESET_ON_FORK; 1111 } 1112 return retval; 1113 } 1114 1115 /** 1116 * sys_sched_getparam - get the RT priority of a thread 1117 * @pid: the pid in question. 1118 * @param: structure containing the RT priority. 1119 * 1120 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 1121 * code. 1122 */ 1123 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 1124 { 1125 struct sched_param lp = { .sched_priority = 0 }; 1126 struct task_struct *p; 1127 int retval; 1128 1129 if (!param || pid < 0) 1130 return -EINVAL; 1131 1132 scoped_guard (rcu) { 1133 p = find_process_by_pid(pid); 1134 if (!p) 1135 return -ESRCH; 1136 1137 retval = security_task_getscheduler(p); 1138 if (retval) 1139 return retval; 1140 1141 if (task_has_rt_policy(p)) 1142 lp.sched_priority = p->rt_priority; 1143 } 1144 1145 /* 1146 * This one might sleep, we cannot do it with a spinlock held ... 1147 */ 1148 return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 1149 } 1150 1151 /* 1152 * Copy the kernel size attribute structure (which might be larger 1153 * than what user-space knows about) to user-space. 1154 * 1155 * Note that all cases are valid: user-space buffer can be larger or 1156 * smaller than the kernel-space buffer. The usual case is that both 1157 * have the same size. 1158 */ 1159 static int 1160 sched_attr_copy_to_user(struct sched_attr __user *uattr, 1161 struct sched_attr *kattr, 1162 unsigned int usize) 1163 { 1164 unsigned int ksize = sizeof(*kattr); 1165 1166 if (!access_ok(uattr, usize)) 1167 return -EFAULT; 1168 1169 /* 1170 * sched_getattr() ABI forwards and backwards compatibility: 1171 * 1172 * If usize == ksize then we just copy everything to user-space and all is good. 1173 * 1174 * If usize < ksize then we only copy as much as user-space has space for, 1175 * this keeps ABI compatibility as well. We skip the rest. 1176 * 1177 * If usize > ksize then user-space is using a newer version of the ABI, 1178 * which part the kernel doesn't know about. Just ignore it - tooling can 1179 * detect the kernel's knowledge of attributes from the attr->size value 1180 * which is set to ksize in this case. 1181 */ 1182 kattr->size = min(usize, ksize); 1183 1184 if (copy_to_user(uattr, kattr, kattr->size)) 1185 return -EFAULT; 1186 1187 return 0; 1188 } 1189 1190 /** 1191 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 1192 * @pid: the pid in question. 1193 * @uattr: structure containing the extended parameters. 1194 * @usize: sizeof(attr) for fwd/bwd comp. 1195 * @flags: for future extension. 1196 */ 1197 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 1198 unsigned int, usize, unsigned int, flags) 1199 { 1200 struct sched_attr kattr = { }; 1201 struct task_struct *p; 1202 int retval; 1203 1204 if (!uattr || pid < 0 || usize > PAGE_SIZE || 1205 usize < SCHED_ATTR_SIZE_VER0 || flags) 1206 return -EINVAL; 1207 1208 scoped_guard (rcu) { 1209 p = find_process_by_pid(pid); 1210 if (!p) 1211 return -ESRCH; 1212 1213 retval = security_task_getscheduler(p); 1214 if (retval) 1215 return retval; 1216 1217 kattr.sched_policy = p->policy; 1218 if (p->sched_reset_on_fork) 1219 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 1220 get_params(p, &kattr); 1221 kattr.sched_flags &= SCHED_FLAG_ALL; 1222 1223 #ifdef CONFIG_UCLAMP_TASK 1224 /* 1225 * This could race with another potential updater, but this is fine 1226 * because it'll correctly read the old or the new value. We don't need 1227 * to guarantee who wins the race as long as it doesn't return garbage. 1228 */ 1229 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 1230 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 1231 #endif 1232 } 1233 1234 return sched_attr_copy_to_user(uattr, &kattr, usize); 1235 } 1236 1237 #ifdef CONFIG_SMP 1238 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 1239 { 1240 /* 1241 * If the task isn't a deadline task or admission control is 1242 * disabled then we don't care about affinity changes. 1243 */ 1244 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) 1245 return 0; 1246 1247 /* 1248 * Since bandwidth control happens on root_domain basis, 1249 * if admission test is enabled, we only admit -deadline 1250 * tasks allowed to run on all the CPUs in the task's 1251 * root_domain. 1252 */ 1253 guard(rcu)(); 1254 if (!cpumask_subset(task_rq(p)->rd->span, mask)) 1255 return -EBUSY; 1256 1257 return 0; 1258 } 1259 #endif /* CONFIG_SMP */ 1260 1261 int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) 1262 { 1263 int retval; 1264 cpumask_var_t cpus_allowed, new_mask; 1265 1266 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) 1267 return -ENOMEM; 1268 1269 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 1270 retval = -ENOMEM; 1271 goto out_free_cpus_allowed; 1272 } 1273 1274 cpuset_cpus_allowed(p, cpus_allowed); 1275 cpumask_and(new_mask, ctx->new_mask, cpus_allowed); 1276 1277 ctx->new_mask = new_mask; 1278 ctx->flags |= SCA_CHECK; 1279 1280 retval = dl_task_check_affinity(p, new_mask); 1281 if (retval) 1282 goto out_free_new_mask; 1283 1284 retval = __set_cpus_allowed_ptr(p, ctx); 1285 if (retval) 1286 goto out_free_new_mask; 1287 1288 cpuset_cpus_allowed(p, cpus_allowed); 1289 if (!cpumask_subset(new_mask, cpus_allowed)) { 1290 /* 1291 * We must have raced with a concurrent cpuset update. 1292 * Just reset the cpumask to the cpuset's cpus_allowed. 1293 */ 1294 cpumask_copy(new_mask, cpus_allowed); 1295 1296 /* 1297 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr() 1298 * will restore the previous user_cpus_ptr value. 1299 * 1300 * In the unlikely event a previous user_cpus_ptr exists, 1301 * we need to further restrict the mask to what is allowed 1302 * by that old user_cpus_ptr. 1303 */ 1304 if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) { 1305 bool empty = !cpumask_and(new_mask, new_mask, 1306 ctx->user_mask); 1307 1308 if (WARN_ON_ONCE(empty)) 1309 cpumask_copy(new_mask, cpus_allowed); 1310 } 1311 __set_cpus_allowed_ptr(p, ctx); 1312 retval = -EINVAL; 1313 } 1314 1315 out_free_new_mask: 1316 free_cpumask_var(new_mask); 1317 out_free_cpus_allowed: 1318 free_cpumask_var(cpus_allowed); 1319 return retval; 1320 } 1321 1322 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 1323 { 1324 struct affinity_context ac; 1325 struct cpumask *user_mask; 1326 int retval; 1327 1328 CLASS(find_get_task, p)(pid); 1329 if (!p) 1330 return -ESRCH; 1331 1332 if (p->flags & PF_NO_SETAFFINITY) 1333 return -EINVAL; 1334 1335 if (!check_same_owner(p)) { 1336 guard(rcu)(); 1337 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) 1338 return -EPERM; 1339 } 1340 1341 retval = security_task_setscheduler(p); 1342 if (retval) 1343 return retval; 1344 1345 /* 1346 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and 1347 * alloc_user_cpus_ptr() returns NULL. 1348 */ 1349 user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE); 1350 if (user_mask) { 1351 cpumask_copy(user_mask, in_mask); 1352 } else if (IS_ENABLED(CONFIG_SMP)) { 1353 return -ENOMEM; 1354 } 1355 1356 ac = (struct affinity_context){ 1357 .new_mask = in_mask, 1358 .user_mask = user_mask, 1359 .flags = SCA_USER, 1360 }; 1361 1362 retval = __sched_setaffinity(p, &ac); 1363 kfree(ac.user_mask); 1364 1365 return retval; 1366 } 1367 1368 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 1369 struct cpumask *new_mask) 1370 { 1371 if (len < cpumask_size()) 1372 cpumask_clear(new_mask); 1373 else if (len > cpumask_size()) 1374 len = cpumask_size(); 1375 1376 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 1377 } 1378 1379 /** 1380 * sys_sched_setaffinity - set the CPU affinity of a process 1381 * @pid: pid of the process 1382 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 1383 * @user_mask_ptr: user-space pointer to the new CPU mask 1384 * 1385 * Return: 0 on success. An error code otherwise. 1386 */ 1387 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 1388 unsigned long __user *, user_mask_ptr) 1389 { 1390 cpumask_var_t new_mask; 1391 int retval; 1392 1393 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 1394 return -ENOMEM; 1395 1396 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 1397 if (retval == 0) 1398 retval = sched_setaffinity(pid, new_mask); 1399 free_cpumask_var(new_mask); 1400 return retval; 1401 } 1402 1403 long sched_getaffinity(pid_t pid, struct cpumask *mask) 1404 { 1405 struct task_struct *p; 1406 int retval; 1407 1408 guard(rcu)(); 1409 p = find_process_by_pid(pid); 1410 if (!p) 1411 return -ESRCH; 1412 1413 retval = security_task_getscheduler(p); 1414 if (retval) 1415 return retval; 1416 1417 guard(raw_spinlock_irqsave)(&p->pi_lock); 1418 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 1419 1420 return 0; 1421 } 1422 1423 /** 1424 * sys_sched_getaffinity - get the CPU affinity of a process 1425 * @pid: pid of the process 1426 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 1427 * @user_mask_ptr: user-space pointer to hold the current CPU mask 1428 * 1429 * Return: size of CPU mask copied to user_mask_ptr on success. An 1430 * error code otherwise. 1431 */ 1432 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 1433 unsigned long __user *, user_mask_ptr) 1434 { 1435 int ret; 1436 cpumask_var_t mask; 1437 1438 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 1439 return -EINVAL; 1440 if (len & (sizeof(unsigned long)-1)) 1441 return -EINVAL; 1442 1443 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 1444 return -ENOMEM; 1445 1446 ret = sched_getaffinity(pid, mask); 1447 if (ret == 0) { 1448 unsigned int retlen = min(len, cpumask_size()); 1449 1450 if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen)) 1451 ret = -EFAULT; 1452 else 1453 ret = retlen; 1454 } 1455 free_cpumask_var(mask); 1456 1457 return ret; 1458 } 1459 1460 static void do_sched_yield(void) 1461 { 1462 struct rq_flags rf; 1463 struct rq *rq; 1464 1465 rq = this_rq_lock_irq(&rf); 1466 1467 schedstat_inc(rq->yld_count); 1468 current->sched_class->yield_task(rq); 1469 1470 preempt_disable(); 1471 rq_unlock_irq(rq, &rf); 1472 sched_preempt_enable_no_resched(); 1473 1474 schedule(); 1475 } 1476 1477 /** 1478 * sys_sched_yield - yield the current processor to other threads. 1479 * 1480 * This function yields the current CPU to other tasks. If there are no 1481 * other threads running on this CPU then this function will return. 1482 * 1483 * Return: 0. 1484 */ 1485 SYSCALL_DEFINE0(sched_yield) 1486 { 1487 do_sched_yield(); 1488 return 0; 1489 } 1490 1491 /** 1492 * yield - yield the current processor to other threads. 1493 * 1494 * Do not ever use this function, there's a 99% chance you're doing it wrong. 1495 * 1496 * The scheduler is at all times free to pick the calling task as the most 1497 * eligible task to run, if removing the yield() call from your code breaks 1498 * it, it's already broken. 1499 * 1500 * Typical broken usage is: 1501 * 1502 * while (!event) 1503 * yield(); 1504 * 1505 * where one assumes that yield() will let 'the other' process run that will 1506 * make event true. If the current task is a SCHED_FIFO task that will never 1507 * happen. Never use yield() as a progress guarantee!! 1508 * 1509 * If you want to use yield() to wait for something, use wait_event(). 1510 * If you want to use yield() to be 'nice' for others, use cond_resched(). 1511 * If you still want to use yield(), do not! 1512 */ 1513 void __sched yield(void) 1514 { 1515 set_current_state(TASK_RUNNING); 1516 do_sched_yield(); 1517 } 1518 EXPORT_SYMBOL(yield); 1519 1520 /** 1521 * yield_to - yield the current processor to another thread in 1522 * your thread group, or accelerate that thread toward the 1523 * processor it's on. 1524 * @p: target task 1525 * @preempt: whether task preemption is allowed or not 1526 * 1527 * It's the caller's job to ensure that the target task struct 1528 * can't go away on us before we can do any checks. 1529 * 1530 * Return: 1531 * true (>0) if we indeed boosted the target task. 1532 * false (0) if we failed to boost the target. 1533 * -ESRCH if there's no task to yield to. 1534 */ 1535 int __sched yield_to(struct task_struct *p, bool preempt) 1536 { 1537 struct task_struct *curr = current; 1538 struct rq *rq, *p_rq; 1539 int yielded = 0; 1540 1541 scoped_guard (irqsave) { 1542 rq = this_rq(); 1543 1544 again: 1545 p_rq = task_rq(p); 1546 /* 1547 * If we're the only runnable task on the rq and target rq also 1548 * has only one task, there's absolutely no point in yielding. 1549 */ 1550 if (rq->nr_running == 1 && p_rq->nr_running == 1) 1551 return -ESRCH; 1552 1553 guard(double_rq_lock)(rq, p_rq); 1554 if (task_rq(p) != p_rq) 1555 goto again; 1556 1557 if (!curr->sched_class->yield_to_task) 1558 return 0; 1559 1560 if (curr->sched_class != p->sched_class) 1561 return 0; 1562 1563 if (task_on_cpu(p_rq, p) || !task_is_running(p)) 1564 return 0; 1565 1566 yielded = curr->sched_class->yield_to_task(rq, p); 1567 if (yielded) { 1568 schedstat_inc(rq->yld_count); 1569 /* 1570 * Make p's CPU reschedule; pick_next_entity 1571 * takes care of fairness. 1572 */ 1573 if (preempt && rq != p_rq) 1574 resched_curr(p_rq); 1575 } 1576 } 1577 1578 if (yielded) 1579 schedule(); 1580 1581 return yielded; 1582 } 1583 EXPORT_SYMBOL_GPL(yield_to); 1584 1585 /** 1586 * sys_sched_get_priority_max - return maximum RT priority. 1587 * @policy: scheduling class. 1588 * 1589 * Return: On success, this syscall returns the maximum 1590 * rt_priority that can be used by a given scheduling class. 1591 * On failure, a negative error code is returned. 1592 */ 1593 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 1594 { 1595 int ret = -EINVAL; 1596 1597 switch (policy) { 1598 case SCHED_FIFO: 1599 case SCHED_RR: 1600 ret = MAX_RT_PRIO-1; 1601 break; 1602 case SCHED_DEADLINE: 1603 case SCHED_NORMAL: 1604 case SCHED_BATCH: 1605 case SCHED_IDLE: 1606 ret = 0; 1607 break; 1608 } 1609 return ret; 1610 } 1611 1612 /** 1613 * sys_sched_get_priority_min - return minimum RT priority. 1614 * @policy: scheduling class. 1615 * 1616 * Return: On success, this syscall returns the minimum 1617 * rt_priority that can be used by a given scheduling class. 1618 * On failure, a negative error code is returned. 1619 */ 1620 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 1621 { 1622 int ret = -EINVAL; 1623 1624 switch (policy) { 1625 case SCHED_FIFO: 1626 case SCHED_RR: 1627 ret = 1; 1628 break; 1629 case SCHED_DEADLINE: 1630 case SCHED_NORMAL: 1631 case SCHED_BATCH: 1632 case SCHED_IDLE: 1633 ret = 0; 1634 } 1635 return ret; 1636 } 1637 1638 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 1639 { 1640 unsigned int time_slice = 0; 1641 int retval; 1642 1643 if (pid < 0) 1644 return -EINVAL; 1645 1646 scoped_guard (rcu) { 1647 struct task_struct *p = find_process_by_pid(pid); 1648 if (!p) 1649 return -ESRCH; 1650 1651 retval = security_task_getscheduler(p); 1652 if (retval) 1653 return retval; 1654 1655 scoped_guard (task_rq_lock, p) { 1656 struct rq *rq = scope.rq; 1657 if (p->sched_class->get_rr_interval) 1658 time_slice = p->sched_class->get_rr_interval(rq, p); 1659 } 1660 } 1661 1662 jiffies_to_timespec64(time_slice, t); 1663 return 0; 1664 } 1665 1666 /** 1667 * sys_sched_rr_get_interval - return the default time-slice of a process. 1668 * @pid: pid of the process. 1669 * @interval: userspace pointer to the time-slice value. 1670 * 1671 * this syscall writes the default time-slice value of a given process 1672 * into the user-space timespec buffer. A value of '0' means infinity. 1673 * 1674 * Return: On success, 0 and the time-slice is in @interval. Otherwise, 1675 * an error code. 1676 */ 1677 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 1678 struct __kernel_timespec __user *, interval) 1679 { 1680 struct timespec64 t; 1681 int retval = sched_rr_get_interval(pid, &t); 1682 1683 if (retval == 0) 1684 retval = put_timespec64(&t, interval); 1685 1686 return retval; 1687 } 1688 1689 #ifdef CONFIG_COMPAT_32BIT_TIME 1690 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 1691 struct old_timespec32 __user *, interval) 1692 { 1693 struct timespec64 t; 1694 int retval = sched_rr_get_interval(pid, &t); 1695 1696 if (retval == 0) 1697 retval = put_old_timespec32(&t, interval); 1698 return retval; 1699 } 1700 #endif 1701