1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/syscalls.c 4 * 5 * Core kernel scheduler syscalls related code 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat 9 */ 10 #include <linux/sched.h> 11 #include <linux/cpuset.h> 12 #include <linux/sched/debug.h> 13 14 #include <uapi/linux/sched/types.h> 15 16 #include "sched.h" 17 #include "autogroup.h" 18 19 static inline int __normal_prio(int policy, int rt_prio, int nice) 20 { 21 int prio; 22 23 if (dl_policy(policy)) 24 prio = MAX_DL_PRIO - 1; 25 else if (rt_policy(policy)) 26 prio = MAX_RT_PRIO - 1 - rt_prio; 27 else 28 prio = NICE_TO_PRIO(nice); 29 30 return prio; 31 } 32 33 /* 34 * Calculate the expected normal priority: i.e. priority 35 * without taking RT-inheritance into account. Might be 36 * boosted by interactivity modifiers. Changes upon fork, 37 * setprio syscalls, and whenever the interactivity 38 * estimator recalculates. 39 */ 40 static inline int normal_prio(struct task_struct *p) 41 { 42 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); 43 } 44 45 /* 46 * Calculate the current priority, i.e. the priority 47 * taken into account by the scheduler. This value might 48 * be boosted by RT tasks, or might be boosted by 49 * interactivity modifiers. Will be RT if the task got 50 * RT-boosted. If not then it returns p->normal_prio. 51 */ 52 static int effective_prio(struct task_struct *p) 53 { 54 p->normal_prio = normal_prio(p); 55 /* 56 * If we are RT tasks or we were boosted to RT priority, 57 * keep the priority unchanged. Otherwise, update priority 58 * to the normal priority: 59 */ 60 if (!rt_or_dl_prio(p->prio)) 61 return p->normal_prio; 62 return p->prio; 63 } 64 65 void set_user_nice(struct task_struct *p, long nice) 66 { 67 bool queued, running; 68 struct rq *rq; 69 int old_prio; 70 71 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 72 return; 73 /* 74 * We have to be careful, if called from sys_setpriority(), 75 * the task might be in the middle of scheduling on another CPU. 76 */ 77 CLASS(task_rq_lock, rq_guard)(p); 78 rq = rq_guard.rq; 79 80 update_rq_clock(rq); 81 82 /* 83 * The RT priorities are set via sched_setscheduler(), but we still 84 * allow the 'normal' nice value to be set - but as expected 85 * it won't have any effect on scheduling until the task is 86 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 87 */ 88 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 89 p->static_prio = NICE_TO_PRIO(nice); 90 return; 91 } 92 93 queued = task_on_rq_queued(p); 94 running = task_current(rq, p); 95 if (queued) 96 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 97 if (running) 98 put_prev_task(rq, p); 99 100 p->static_prio = NICE_TO_PRIO(nice); 101 set_load_weight(p, true); 102 old_prio = p->prio; 103 p->prio = effective_prio(p); 104 105 if (queued) 106 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 107 if (running) 108 set_next_task(rq, p); 109 110 /* 111 * If the task increased its priority or is running and 112 * lowered its priority, then reschedule its CPU: 113 */ 114 p->sched_class->prio_changed(rq, p, old_prio); 115 } 116 EXPORT_SYMBOL(set_user_nice); 117 118 /* 119 * is_nice_reduction - check if nice value is an actual reduction 120 * 121 * Similar to can_nice() but does not perform a capability check. 122 * 123 * @p: task 124 * @nice: nice value 125 */ 126 static bool is_nice_reduction(const struct task_struct *p, const int nice) 127 { 128 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 129 int nice_rlim = nice_to_rlimit(nice); 130 131 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); 132 } 133 134 /* 135 * can_nice - check if a task can reduce its nice value 136 * @p: task 137 * @nice: nice value 138 */ 139 int can_nice(const struct task_struct *p, const int nice) 140 { 141 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); 142 } 143 144 #ifdef __ARCH_WANT_SYS_NICE 145 146 /* 147 * sys_nice - change the priority of the current process. 148 * @increment: priority increment 149 * 150 * sys_setpriority is a more generic, but much slower function that 151 * does similar things. 152 */ 153 SYSCALL_DEFINE1(nice, int, increment) 154 { 155 long nice, retval; 156 157 /* 158 * Setpriority might change our priority at the same moment. 159 * We don't have to worry. Conceptually one call occurs first 160 * and we have a single winner. 161 */ 162 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 163 nice = task_nice(current) + increment; 164 165 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 166 if (increment < 0 && !can_nice(current, nice)) 167 return -EPERM; 168 169 retval = security_task_setnice(current, nice); 170 if (retval) 171 return retval; 172 173 set_user_nice(current, nice); 174 return 0; 175 } 176 177 #endif 178 179 /** 180 * task_prio - return the priority value of a given task. 181 * @p: the task in question. 182 * 183 * Return: The priority value as seen by users in /proc. 184 * 185 * sched policy return value kernel prio user prio/nice 186 * 187 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19] 188 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99] 189 * deadline -101 -1 0 190 */ 191 int task_prio(const struct task_struct *p) 192 { 193 return p->prio - MAX_RT_PRIO; 194 } 195 196 /** 197 * idle_cpu - is a given CPU idle currently? 198 * @cpu: the processor in question. 199 * 200 * Return: 1 if the CPU is currently idle. 0 otherwise. 201 */ 202 int idle_cpu(int cpu) 203 { 204 struct rq *rq = cpu_rq(cpu); 205 206 if (rq->curr != rq->idle) 207 return 0; 208 209 if (rq->nr_running) 210 return 0; 211 212 #ifdef CONFIG_SMP 213 if (rq->ttwu_pending) 214 return 0; 215 #endif 216 217 return 1; 218 } 219 220 /** 221 * available_idle_cpu - is a given CPU idle for enqueuing work. 222 * @cpu: the CPU in question. 223 * 224 * Return: 1 if the CPU is currently idle. 0 otherwise. 225 */ 226 int available_idle_cpu(int cpu) 227 { 228 if (!idle_cpu(cpu)) 229 return 0; 230 231 if (vcpu_is_preempted(cpu)) 232 return 0; 233 234 return 1; 235 } 236 237 /** 238 * idle_task - return the idle task for a given CPU. 239 * @cpu: the processor in question. 240 * 241 * Return: The idle task for the CPU @cpu. 242 */ 243 struct task_struct *idle_task(int cpu) 244 { 245 return cpu_rq(cpu)->idle; 246 } 247 248 #ifdef CONFIG_SCHED_CORE 249 int sched_core_idle_cpu(int cpu) 250 { 251 struct rq *rq = cpu_rq(cpu); 252 253 if (sched_core_enabled(rq) && rq->curr == rq->idle) 254 return 1; 255 256 return idle_cpu(cpu); 257 } 258 259 #endif 260 261 #ifdef CONFIG_SMP 262 /* 263 * Load avg and utiliztion metrics need to be updated periodically and before 264 * consumption. This function updates the metrics for all subsystems except for 265 * the fair class. @rq must be locked and have its clock updated. 266 */ 267 bool update_other_load_avgs(struct rq *rq) 268 { 269 u64 now = rq_clock_pelt(rq); 270 const struct sched_class *curr_class = rq->curr->sched_class; 271 unsigned long hw_pressure = arch_scale_hw_pressure(cpu_of(rq)); 272 273 lockdep_assert_rq_held(rq); 274 275 return update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) | 276 update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) | 277 update_hw_load_avg(now, rq, hw_pressure) | 278 update_irq_load_avg(rq, 0); 279 } 280 281 /* 282 * This function computes an effective utilization for the given CPU, to be 283 * used for frequency selection given the linear relation: f = u * f_max. 284 * 285 * The scheduler tracks the following metrics: 286 * 287 * cpu_util_{cfs,rt,dl,irq}() 288 * cpu_bw_dl() 289 * 290 * Where the cfs,rt and dl util numbers are tracked with the same metric and 291 * synchronized windows and are thus directly comparable. 292 * 293 * The cfs,rt,dl utilization are the running times measured with rq->clock_task 294 * which excludes things like IRQ and steal-time. These latter are then accrued 295 * in the IRQ utilization. 296 * 297 * The DL bandwidth number OTOH is not a measured metric but a value computed 298 * based on the task model parameters and gives the minimal utilization 299 * required to meet deadlines. 300 */ 301 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 302 unsigned long *min, 303 unsigned long *max) 304 { 305 unsigned long util, irq, scale; 306 struct rq *rq = cpu_rq(cpu); 307 308 scale = arch_scale_cpu_capacity(cpu); 309 310 /* 311 * Early check to see if IRQ/steal time saturates the CPU, can be 312 * because of inaccuracies in how we track these -- see 313 * update_irq_load_avg(). 314 */ 315 irq = cpu_util_irq(rq); 316 if (unlikely(irq >= scale)) { 317 if (min) 318 *min = scale; 319 if (max) 320 *max = scale; 321 return scale; 322 } 323 324 if (min) { 325 /* 326 * The minimum utilization returns the highest level between: 327 * - the computed DL bandwidth needed with the IRQ pressure which 328 * steals time to the deadline task. 329 * - The minimum performance requirement for CFS and/or RT. 330 */ 331 *min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN)); 332 333 /* 334 * When an RT task is runnable and uclamp is not used, we must 335 * ensure that the task will run at maximum compute capacity. 336 */ 337 if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt)) 338 *min = max(*min, scale); 339 } 340 341 /* 342 * Because the time spend on RT/DL tasks is visible as 'lost' time to 343 * CFS tasks and we use the same metric to track the effective 344 * utilization (PELT windows are synchronized) we can directly add them 345 * to obtain the CPU's actual utilization. 346 */ 347 util = util_cfs + cpu_util_rt(rq); 348 util += cpu_util_dl(rq); 349 350 /* 351 * The maximum hint is a soft bandwidth requirement, which can be lower 352 * than the actual utilization because of uclamp_max requirements. 353 */ 354 if (max) 355 *max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX)); 356 357 if (util >= scale) 358 return scale; 359 360 /* 361 * There is still idle time; further improve the number by using the 362 * IRQ metric. Because IRQ/steal time is hidden from the task clock we 363 * need to scale the task numbers: 364 * 365 * max - irq 366 * U' = irq + --------- * U 367 * max 368 */ 369 util = scale_irq_capacity(util, irq, scale); 370 util += irq; 371 372 return min(scale, util); 373 } 374 375 unsigned long sched_cpu_util(int cpu) 376 { 377 return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL); 378 } 379 #endif /* CONFIG_SMP */ 380 381 /** 382 * find_process_by_pid - find a process with a matching PID value. 383 * @pid: the pid in question. 384 * 385 * The task of @pid, if found. %NULL otherwise. 386 */ 387 static struct task_struct *find_process_by_pid(pid_t pid) 388 { 389 return pid ? find_task_by_vpid(pid) : current; 390 } 391 392 static struct task_struct *find_get_task(pid_t pid) 393 { 394 struct task_struct *p; 395 guard(rcu)(); 396 397 p = find_process_by_pid(pid); 398 if (likely(p)) 399 get_task_struct(p); 400 401 return p; 402 } 403 404 DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T), 405 find_get_task(pid), pid_t pid) 406 407 /* 408 * sched_setparam() passes in -1 for its policy, to let the functions 409 * it calls know not to change it. 410 */ 411 #define SETPARAM_POLICY -1 412 413 static void __setscheduler_params(struct task_struct *p, 414 const struct sched_attr *attr) 415 { 416 int policy = attr->sched_policy; 417 418 if (policy == SETPARAM_POLICY) 419 policy = p->policy; 420 421 p->policy = policy; 422 423 if (dl_policy(policy)) { 424 __setparam_dl(p, attr); 425 } else if (fair_policy(policy)) { 426 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 427 if (attr->sched_runtime) { 428 p->se.custom_slice = 1; 429 p->se.slice = clamp_t(u64, attr->sched_runtime, 430 NSEC_PER_MSEC/10, /* HZ=1000 * 10 */ 431 NSEC_PER_MSEC*100); /* HZ=100 / 10 */ 432 } else { 433 p->se.custom_slice = 0; 434 p->se.slice = sysctl_sched_base_slice; 435 } 436 } 437 438 /* 439 * __sched_setscheduler() ensures attr->sched_priority == 0 when 440 * !rt_policy. Always setting this ensures that things like 441 * getparam()/getattr() don't report silly values for !rt tasks. 442 */ 443 p->rt_priority = attr->sched_priority; 444 p->normal_prio = normal_prio(p); 445 set_load_weight(p, true); 446 } 447 448 /* 449 * Check the target process has a UID that matches the current process's: 450 */ 451 static bool check_same_owner(struct task_struct *p) 452 { 453 const struct cred *cred = current_cred(), *pcred; 454 guard(rcu)(); 455 456 pcred = __task_cred(p); 457 return (uid_eq(cred->euid, pcred->euid) || 458 uid_eq(cred->euid, pcred->uid)); 459 } 460 461 #ifdef CONFIG_UCLAMP_TASK 462 463 static int uclamp_validate(struct task_struct *p, 464 const struct sched_attr *attr) 465 { 466 int util_min = p->uclamp_req[UCLAMP_MIN].value; 467 int util_max = p->uclamp_req[UCLAMP_MAX].value; 468 469 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 470 util_min = attr->sched_util_min; 471 472 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) 473 return -EINVAL; 474 } 475 476 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 477 util_max = attr->sched_util_max; 478 479 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) 480 return -EINVAL; 481 } 482 483 if (util_min != -1 && util_max != -1 && util_min > util_max) 484 return -EINVAL; 485 486 /* 487 * We have valid uclamp attributes; make sure uclamp is enabled. 488 * 489 * We need to do that here, because enabling static branches is a 490 * blocking operation which obviously cannot be done while holding 491 * scheduler locks. 492 */ 493 static_branch_enable(&sched_uclamp_used); 494 495 return 0; 496 } 497 498 static bool uclamp_reset(const struct sched_attr *attr, 499 enum uclamp_id clamp_id, 500 struct uclamp_se *uc_se) 501 { 502 /* Reset on sched class change for a non user-defined clamp value. */ 503 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && 504 !uc_se->user_defined) 505 return true; 506 507 /* Reset on sched_util_{min,max} == -1. */ 508 if (clamp_id == UCLAMP_MIN && 509 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 510 attr->sched_util_min == -1) { 511 return true; 512 } 513 514 if (clamp_id == UCLAMP_MAX && 515 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 516 attr->sched_util_max == -1) { 517 return true; 518 } 519 520 return false; 521 } 522 523 static void __setscheduler_uclamp(struct task_struct *p, 524 const struct sched_attr *attr) 525 { 526 enum uclamp_id clamp_id; 527 528 for_each_clamp_id(clamp_id) { 529 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 530 unsigned int value; 531 532 if (!uclamp_reset(attr, clamp_id, uc_se)) 533 continue; 534 535 /* 536 * RT by default have a 100% boost value that could be modified 537 * at runtime. 538 */ 539 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 540 value = sysctl_sched_uclamp_util_min_rt_default; 541 else 542 value = uclamp_none(clamp_id); 543 544 uclamp_se_set(uc_se, value, false); 545 546 } 547 548 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 549 return; 550 551 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 552 attr->sched_util_min != -1) { 553 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 554 attr->sched_util_min, true); 555 } 556 557 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 558 attr->sched_util_max != -1) { 559 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 560 attr->sched_util_max, true); 561 } 562 } 563 564 #else /* !CONFIG_UCLAMP_TASK: */ 565 566 static inline int uclamp_validate(struct task_struct *p, 567 const struct sched_attr *attr) 568 { 569 return -EOPNOTSUPP; 570 } 571 static void __setscheduler_uclamp(struct task_struct *p, 572 const struct sched_attr *attr) { } 573 #endif 574 575 /* 576 * Allow unprivileged RT tasks to decrease priority. 577 * Only issue a capable test if needed and only once to avoid an audit 578 * event on permitted non-privileged operations: 579 */ 580 static int user_check_sched_setscheduler(struct task_struct *p, 581 const struct sched_attr *attr, 582 int policy, int reset_on_fork) 583 { 584 if (fair_policy(policy)) { 585 if (attr->sched_nice < task_nice(p) && 586 !is_nice_reduction(p, attr->sched_nice)) 587 goto req_priv; 588 } 589 590 if (rt_policy(policy)) { 591 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); 592 593 /* Can't set/change the rt policy: */ 594 if (policy != p->policy && !rlim_rtprio) 595 goto req_priv; 596 597 /* Can't increase priority: */ 598 if (attr->sched_priority > p->rt_priority && 599 attr->sched_priority > rlim_rtprio) 600 goto req_priv; 601 } 602 603 /* 604 * Can't set/change SCHED_DEADLINE policy at all for now 605 * (safest behavior); in the future we would like to allow 606 * unprivileged DL tasks to increase their relative deadline 607 * or reduce their runtime (both ways reducing utilization) 608 */ 609 if (dl_policy(policy)) 610 goto req_priv; 611 612 /* 613 * Treat SCHED_IDLE as nice 20. Only allow a switch to 614 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 615 */ 616 if (task_has_idle_policy(p) && !idle_policy(policy)) { 617 if (!is_nice_reduction(p, task_nice(p))) 618 goto req_priv; 619 } 620 621 /* Can't change other user's priorities: */ 622 if (!check_same_owner(p)) 623 goto req_priv; 624 625 /* Normal users shall not reset the sched_reset_on_fork flag: */ 626 if (p->sched_reset_on_fork && !reset_on_fork) 627 goto req_priv; 628 629 return 0; 630 631 req_priv: 632 if (!capable(CAP_SYS_NICE)) 633 return -EPERM; 634 635 return 0; 636 } 637 638 int __sched_setscheduler(struct task_struct *p, 639 const struct sched_attr *attr, 640 bool user, bool pi) 641 { 642 int oldpolicy = -1, policy = attr->sched_policy; 643 int retval, oldprio, newprio, queued, running; 644 const struct sched_class *prev_class; 645 struct balance_callback *head; 646 struct rq_flags rf; 647 int reset_on_fork; 648 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 649 struct rq *rq; 650 bool cpuset_locked = false; 651 652 /* The pi code expects interrupts enabled */ 653 BUG_ON(pi && in_interrupt()); 654 recheck: 655 /* Double check policy once rq lock held: */ 656 if (policy < 0) { 657 reset_on_fork = p->sched_reset_on_fork; 658 policy = oldpolicy = p->policy; 659 } else { 660 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 661 662 if (!valid_policy(policy)) 663 return -EINVAL; 664 } 665 666 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 667 return -EINVAL; 668 669 /* 670 * Valid priorities for SCHED_FIFO and SCHED_RR are 671 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, 672 * SCHED_BATCH and SCHED_IDLE is 0. 673 */ 674 if (attr->sched_priority > MAX_RT_PRIO-1) 675 return -EINVAL; 676 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 677 (rt_policy(policy) != (attr->sched_priority != 0))) 678 return -EINVAL; 679 680 if (user) { 681 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); 682 if (retval) 683 return retval; 684 685 if (attr->sched_flags & SCHED_FLAG_SUGOV) 686 return -EINVAL; 687 688 retval = security_task_setscheduler(p); 689 if (retval) 690 return retval; 691 } 692 693 /* Update task specific "requested" clamps */ 694 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 695 retval = uclamp_validate(p, attr); 696 if (retval) 697 return retval; 698 } 699 700 /* 701 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets 702 * information. 703 */ 704 if (dl_policy(policy) || dl_policy(p->policy)) { 705 cpuset_locked = true; 706 cpuset_lock(); 707 } 708 709 /* 710 * Make sure no PI-waiters arrive (or leave) while we are 711 * changing the priority of the task: 712 * 713 * To be able to change p->policy safely, the appropriate 714 * runqueue lock must be held. 715 */ 716 rq = task_rq_lock(p, &rf); 717 update_rq_clock(rq); 718 719 /* 720 * Changing the policy of the stop threads its a very bad idea: 721 */ 722 if (p == rq->stop) { 723 retval = -EINVAL; 724 goto unlock; 725 } 726 727 retval = scx_check_setscheduler(p, policy); 728 if (retval) 729 goto unlock; 730 731 /* 732 * If not changing anything there's no need to proceed further, 733 * but store a possible modification of reset_on_fork. 734 */ 735 if (unlikely(policy == p->policy)) { 736 if (fair_policy(policy) && 737 (attr->sched_nice != task_nice(p) || 738 (attr->sched_runtime != p->se.slice))) 739 goto change; 740 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 741 goto change; 742 if (dl_policy(policy) && dl_param_changed(p, attr)) 743 goto change; 744 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 745 goto change; 746 747 p->sched_reset_on_fork = reset_on_fork; 748 retval = 0; 749 goto unlock; 750 } 751 change: 752 753 if (user) { 754 #ifdef CONFIG_RT_GROUP_SCHED 755 /* 756 * Do not allow real-time tasks into groups that have no runtime 757 * assigned. 758 */ 759 if (rt_bandwidth_enabled() && rt_policy(policy) && 760 task_group(p)->rt_bandwidth.rt_runtime == 0 && 761 !task_group_is_autogroup(task_group(p))) { 762 retval = -EPERM; 763 goto unlock; 764 } 765 #endif 766 #ifdef CONFIG_SMP 767 if (dl_bandwidth_enabled() && dl_policy(policy) && 768 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 769 cpumask_t *span = rq->rd->span; 770 771 /* 772 * Don't allow tasks with an affinity mask smaller than 773 * the entire root_domain to become SCHED_DEADLINE. We 774 * will also fail if there's no bandwidth available. 775 */ 776 if (!cpumask_subset(span, p->cpus_ptr) || 777 rq->rd->dl_bw.bw == 0) { 778 retval = -EPERM; 779 goto unlock; 780 } 781 } 782 #endif 783 } 784 785 /* Re-check policy now with rq lock held: */ 786 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 787 policy = oldpolicy = -1; 788 task_rq_unlock(rq, p, &rf); 789 if (cpuset_locked) 790 cpuset_unlock(); 791 goto recheck; 792 } 793 794 /* 795 * If setscheduling to SCHED_DEADLINE (or changing the parameters 796 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 797 * is available. 798 */ 799 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 800 retval = -EBUSY; 801 goto unlock; 802 } 803 804 p->sched_reset_on_fork = reset_on_fork; 805 oldprio = p->prio; 806 807 newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); 808 if (pi) { 809 /* 810 * Take priority boosted tasks into account. If the new 811 * effective priority is unchanged, we just store the new 812 * normal parameters and do not touch the scheduler class and 813 * the runqueue. This will be done when the task deboost 814 * itself. 815 */ 816 newprio = rt_effective_prio(p, newprio); 817 if (newprio == oldprio) 818 queue_flags &= ~DEQUEUE_MOVE; 819 } 820 821 queued = task_on_rq_queued(p); 822 running = task_current(rq, p); 823 if (queued) 824 dequeue_task(rq, p, queue_flags); 825 if (running) 826 put_prev_task(rq, p); 827 828 prev_class = p->sched_class; 829 830 if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { 831 __setscheduler_params(p, attr); 832 __setscheduler_prio(p, newprio); 833 } 834 __setscheduler_uclamp(p, attr); 835 check_class_changing(rq, p, prev_class); 836 837 if (queued) { 838 /* 839 * We enqueue to tail when the priority of a task is 840 * increased (user space view). 841 */ 842 if (oldprio < p->prio) 843 queue_flags |= ENQUEUE_HEAD; 844 845 enqueue_task(rq, p, queue_flags); 846 } 847 if (running) 848 set_next_task(rq, p); 849 850 check_class_changed(rq, p, prev_class, oldprio); 851 852 /* Avoid rq from going away on us: */ 853 preempt_disable(); 854 head = splice_balance_callbacks(rq); 855 task_rq_unlock(rq, p, &rf); 856 857 if (pi) { 858 if (cpuset_locked) 859 cpuset_unlock(); 860 rt_mutex_adjust_pi(p); 861 } 862 863 /* Run balance callbacks after we've adjusted the PI chain: */ 864 balance_callbacks(rq, head); 865 preempt_enable(); 866 867 return 0; 868 869 unlock: 870 task_rq_unlock(rq, p, &rf); 871 if (cpuset_locked) 872 cpuset_unlock(); 873 return retval; 874 } 875 876 static int _sched_setscheduler(struct task_struct *p, int policy, 877 const struct sched_param *param, bool check) 878 { 879 struct sched_attr attr = { 880 .sched_policy = policy, 881 .sched_priority = param->sched_priority, 882 .sched_nice = PRIO_TO_NICE(p->static_prio), 883 }; 884 885 if (p->se.custom_slice) 886 attr.sched_runtime = p->se.slice; 887 888 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 889 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 890 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 891 policy &= ~SCHED_RESET_ON_FORK; 892 attr.sched_policy = policy; 893 } 894 895 return __sched_setscheduler(p, &attr, check, true); 896 } 897 /** 898 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 899 * @p: the task in question. 900 * @policy: new policy. 901 * @param: structure containing the new RT priority. 902 * 903 * Use sched_set_fifo(), read its comment. 904 * 905 * Return: 0 on success. An error code otherwise. 906 * 907 * NOTE that the task may be already dead. 908 */ 909 int sched_setscheduler(struct task_struct *p, int policy, 910 const struct sched_param *param) 911 { 912 return _sched_setscheduler(p, policy, param, true); 913 } 914 915 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 916 { 917 return __sched_setscheduler(p, attr, true, true); 918 } 919 920 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 921 { 922 return __sched_setscheduler(p, attr, false, true); 923 } 924 EXPORT_SYMBOL_GPL(sched_setattr_nocheck); 925 926 /** 927 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernel-space. 928 * @p: the task in question. 929 * @policy: new policy. 930 * @param: structure containing the new RT priority. 931 * 932 * Just like sched_setscheduler, only don't bother checking if the 933 * current context has permission. For example, this is needed in 934 * stop_machine(): we create temporary high priority worker threads, 935 * but our caller might not have that capability. 936 * 937 * Return: 0 on success. An error code otherwise. 938 */ 939 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 940 const struct sched_param *param) 941 { 942 return _sched_setscheduler(p, policy, param, false); 943 } 944 945 /* 946 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally 947 * incapable of resource management, which is the one thing an OS really should 948 * be doing. 949 * 950 * This is of course the reason it is limited to privileged users only. 951 * 952 * Worse still; it is fundamentally impossible to compose static priority 953 * workloads. You cannot take two correctly working static prio workloads 954 * and smash them together and still expect them to work. 955 * 956 * For this reason 'all' FIFO tasks the kernel creates are basically at: 957 * 958 * MAX_RT_PRIO / 2 959 * 960 * The administrator _MUST_ configure the system, the kernel simply doesn't 961 * know enough information to make a sensible choice. 962 */ 963 void sched_set_fifo(struct task_struct *p) 964 { 965 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; 966 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 967 } 968 EXPORT_SYMBOL_GPL(sched_set_fifo); 969 970 /* 971 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. 972 */ 973 void sched_set_fifo_low(struct task_struct *p) 974 { 975 struct sched_param sp = { .sched_priority = 1 }; 976 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 977 } 978 EXPORT_SYMBOL_GPL(sched_set_fifo_low); 979 980 void sched_set_normal(struct task_struct *p, int nice) 981 { 982 struct sched_attr attr = { 983 .sched_policy = SCHED_NORMAL, 984 .sched_nice = nice, 985 }; 986 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); 987 } 988 EXPORT_SYMBOL_GPL(sched_set_normal); 989 990 static int 991 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 992 { 993 struct sched_param lparam; 994 995 if (!param || pid < 0) 996 return -EINVAL; 997 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 998 return -EFAULT; 999 1000 CLASS(find_get_task, p)(pid); 1001 if (!p) 1002 return -ESRCH; 1003 1004 return sched_setscheduler(p, policy, &lparam); 1005 } 1006 1007 /* 1008 * Mimics kernel/events/core.c perf_copy_attr(). 1009 */ 1010 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 1011 { 1012 u32 size; 1013 int ret; 1014 1015 /* Zero the full structure, so that a short copy will be nice: */ 1016 memset(attr, 0, sizeof(*attr)); 1017 1018 ret = get_user(size, &uattr->size); 1019 if (ret) 1020 return ret; 1021 1022 /* ABI compatibility quirk: */ 1023 if (!size) 1024 size = SCHED_ATTR_SIZE_VER0; 1025 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 1026 goto err_size; 1027 1028 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 1029 if (ret) { 1030 if (ret == -E2BIG) 1031 goto err_size; 1032 return ret; 1033 } 1034 1035 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 1036 size < SCHED_ATTR_SIZE_VER1) 1037 return -EINVAL; 1038 1039 /* 1040 * XXX: Do we want to be lenient like existing syscalls; or do we want 1041 * to be strict and return an error on out-of-bounds values? 1042 */ 1043 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 1044 1045 return 0; 1046 1047 err_size: 1048 put_user(sizeof(*attr), &uattr->size); 1049 return -E2BIG; 1050 } 1051 1052 static void get_params(struct task_struct *p, struct sched_attr *attr) 1053 { 1054 if (task_has_dl_policy(p)) { 1055 __getparam_dl(p, attr); 1056 } else if (task_has_rt_policy(p)) { 1057 attr->sched_priority = p->rt_priority; 1058 } else { 1059 attr->sched_nice = task_nice(p); 1060 attr->sched_runtime = p->se.slice; 1061 } 1062 } 1063 1064 /** 1065 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 1066 * @pid: the pid in question. 1067 * @policy: new policy. 1068 * @param: structure containing the new RT priority. 1069 * 1070 * Return: 0 on success. An error code otherwise. 1071 */ 1072 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 1073 { 1074 if (policy < 0) 1075 return -EINVAL; 1076 1077 return do_sched_setscheduler(pid, policy, param); 1078 } 1079 1080 /** 1081 * sys_sched_setparam - set/change the RT priority of a thread 1082 * @pid: the pid in question. 1083 * @param: structure containing the new RT priority. 1084 * 1085 * Return: 0 on success. An error code otherwise. 1086 */ 1087 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 1088 { 1089 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 1090 } 1091 1092 /** 1093 * sys_sched_setattr - same as above, but with extended sched_attr 1094 * @pid: the pid in question. 1095 * @uattr: structure containing the extended parameters. 1096 * @flags: for future extension. 1097 */ 1098 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 1099 unsigned int, flags) 1100 { 1101 struct sched_attr attr; 1102 int retval; 1103 1104 if (!uattr || pid < 0 || flags) 1105 return -EINVAL; 1106 1107 retval = sched_copy_attr(uattr, &attr); 1108 if (retval) 1109 return retval; 1110 1111 if ((int)attr.sched_policy < 0) 1112 return -EINVAL; 1113 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 1114 attr.sched_policy = SETPARAM_POLICY; 1115 1116 CLASS(find_get_task, p)(pid); 1117 if (!p) 1118 return -ESRCH; 1119 1120 if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS) 1121 get_params(p, &attr); 1122 1123 return sched_setattr(p, &attr); 1124 } 1125 1126 /** 1127 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 1128 * @pid: the pid in question. 1129 * 1130 * Return: On success, the policy of the thread. Otherwise, a negative error 1131 * code. 1132 */ 1133 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 1134 { 1135 struct task_struct *p; 1136 int retval; 1137 1138 if (pid < 0) 1139 return -EINVAL; 1140 1141 guard(rcu)(); 1142 p = find_process_by_pid(pid); 1143 if (!p) 1144 return -ESRCH; 1145 1146 retval = security_task_getscheduler(p); 1147 if (!retval) { 1148 retval = p->policy; 1149 if (p->sched_reset_on_fork) 1150 retval |= SCHED_RESET_ON_FORK; 1151 } 1152 return retval; 1153 } 1154 1155 /** 1156 * sys_sched_getparam - get the RT priority of a thread 1157 * @pid: the pid in question. 1158 * @param: structure containing the RT priority. 1159 * 1160 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 1161 * code. 1162 */ 1163 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 1164 { 1165 struct sched_param lp = { .sched_priority = 0 }; 1166 struct task_struct *p; 1167 int retval; 1168 1169 if (!param || pid < 0) 1170 return -EINVAL; 1171 1172 scoped_guard (rcu) { 1173 p = find_process_by_pid(pid); 1174 if (!p) 1175 return -ESRCH; 1176 1177 retval = security_task_getscheduler(p); 1178 if (retval) 1179 return retval; 1180 1181 if (task_has_rt_policy(p)) 1182 lp.sched_priority = p->rt_priority; 1183 } 1184 1185 /* 1186 * This one might sleep, we cannot do it with a spinlock held ... 1187 */ 1188 return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 1189 } 1190 1191 /* 1192 * Copy the kernel size attribute structure (which might be larger 1193 * than what user-space knows about) to user-space. 1194 * 1195 * Note that all cases are valid: user-space buffer can be larger or 1196 * smaller than the kernel-space buffer. The usual case is that both 1197 * have the same size. 1198 */ 1199 static int 1200 sched_attr_copy_to_user(struct sched_attr __user *uattr, 1201 struct sched_attr *kattr, 1202 unsigned int usize) 1203 { 1204 unsigned int ksize = sizeof(*kattr); 1205 1206 if (!access_ok(uattr, usize)) 1207 return -EFAULT; 1208 1209 /* 1210 * sched_getattr() ABI forwards and backwards compatibility: 1211 * 1212 * If usize == ksize then we just copy everything to user-space and all is good. 1213 * 1214 * If usize < ksize then we only copy as much as user-space has space for, 1215 * this keeps ABI compatibility as well. We skip the rest. 1216 * 1217 * If usize > ksize then user-space is using a newer version of the ABI, 1218 * which part the kernel doesn't know about. Just ignore it - tooling can 1219 * detect the kernel's knowledge of attributes from the attr->size value 1220 * which is set to ksize in this case. 1221 */ 1222 kattr->size = min(usize, ksize); 1223 1224 if (copy_to_user(uattr, kattr, kattr->size)) 1225 return -EFAULT; 1226 1227 return 0; 1228 } 1229 1230 /** 1231 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 1232 * @pid: the pid in question. 1233 * @uattr: structure containing the extended parameters. 1234 * @usize: sizeof(attr) for fwd/bwd comp. 1235 * @flags: for future extension. 1236 */ 1237 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 1238 unsigned int, usize, unsigned int, flags) 1239 { 1240 struct sched_attr kattr = { }; 1241 struct task_struct *p; 1242 int retval; 1243 1244 if (!uattr || pid < 0 || usize > PAGE_SIZE || 1245 usize < SCHED_ATTR_SIZE_VER0 || flags) 1246 return -EINVAL; 1247 1248 scoped_guard (rcu) { 1249 p = find_process_by_pid(pid); 1250 if (!p) 1251 return -ESRCH; 1252 1253 retval = security_task_getscheduler(p); 1254 if (retval) 1255 return retval; 1256 1257 kattr.sched_policy = p->policy; 1258 if (p->sched_reset_on_fork) 1259 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 1260 get_params(p, &kattr); 1261 kattr.sched_flags &= SCHED_FLAG_ALL; 1262 1263 #ifdef CONFIG_UCLAMP_TASK 1264 /* 1265 * This could race with another potential updater, but this is fine 1266 * because it'll correctly read the old or the new value. We don't need 1267 * to guarantee who wins the race as long as it doesn't return garbage. 1268 */ 1269 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 1270 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 1271 #endif 1272 } 1273 1274 return sched_attr_copy_to_user(uattr, &kattr, usize); 1275 } 1276 1277 #ifdef CONFIG_SMP 1278 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 1279 { 1280 /* 1281 * If the task isn't a deadline task or admission control is 1282 * disabled then we don't care about affinity changes. 1283 */ 1284 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) 1285 return 0; 1286 1287 /* 1288 * Since bandwidth control happens on root_domain basis, 1289 * if admission test is enabled, we only admit -deadline 1290 * tasks allowed to run on all the CPUs in the task's 1291 * root_domain. 1292 */ 1293 guard(rcu)(); 1294 if (!cpumask_subset(task_rq(p)->rd->span, mask)) 1295 return -EBUSY; 1296 1297 return 0; 1298 } 1299 #endif /* CONFIG_SMP */ 1300 1301 int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) 1302 { 1303 int retval; 1304 cpumask_var_t cpus_allowed, new_mask; 1305 1306 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) 1307 return -ENOMEM; 1308 1309 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 1310 retval = -ENOMEM; 1311 goto out_free_cpus_allowed; 1312 } 1313 1314 cpuset_cpus_allowed(p, cpus_allowed); 1315 cpumask_and(new_mask, ctx->new_mask, cpus_allowed); 1316 1317 ctx->new_mask = new_mask; 1318 ctx->flags |= SCA_CHECK; 1319 1320 retval = dl_task_check_affinity(p, new_mask); 1321 if (retval) 1322 goto out_free_new_mask; 1323 1324 retval = __set_cpus_allowed_ptr(p, ctx); 1325 if (retval) 1326 goto out_free_new_mask; 1327 1328 cpuset_cpus_allowed(p, cpus_allowed); 1329 if (!cpumask_subset(new_mask, cpus_allowed)) { 1330 /* 1331 * We must have raced with a concurrent cpuset update. 1332 * Just reset the cpumask to the cpuset's cpus_allowed. 1333 */ 1334 cpumask_copy(new_mask, cpus_allowed); 1335 1336 /* 1337 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr() 1338 * will restore the previous user_cpus_ptr value. 1339 * 1340 * In the unlikely event a previous user_cpus_ptr exists, 1341 * we need to further restrict the mask to what is allowed 1342 * by that old user_cpus_ptr. 1343 */ 1344 if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) { 1345 bool empty = !cpumask_and(new_mask, new_mask, 1346 ctx->user_mask); 1347 1348 if (WARN_ON_ONCE(empty)) 1349 cpumask_copy(new_mask, cpus_allowed); 1350 } 1351 __set_cpus_allowed_ptr(p, ctx); 1352 retval = -EINVAL; 1353 } 1354 1355 out_free_new_mask: 1356 free_cpumask_var(new_mask); 1357 out_free_cpus_allowed: 1358 free_cpumask_var(cpus_allowed); 1359 return retval; 1360 } 1361 1362 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 1363 { 1364 struct affinity_context ac; 1365 struct cpumask *user_mask; 1366 int retval; 1367 1368 CLASS(find_get_task, p)(pid); 1369 if (!p) 1370 return -ESRCH; 1371 1372 if (p->flags & PF_NO_SETAFFINITY) 1373 return -EINVAL; 1374 1375 if (!check_same_owner(p)) { 1376 guard(rcu)(); 1377 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) 1378 return -EPERM; 1379 } 1380 1381 retval = security_task_setscheduler(p); 1382 if (retval) 1383 return retval; 1384 1385 /* 1386 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and 1387 * alloc_user_cpus_ptr() returns NULL. 1388 */ 1389 user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE); 1390 if (user_mask) { 1391 cpumask_copy(user_mask, in_mask); 1392 } else if (IS_ENABLED(CONFIG_SMP)) { 1393 return -ENOMEM; 1394 } 1395 1396 ac = (struct affinity_context){ 1397 .new_mask = in_mask, 1398 .user_mask = user_mask, 1399 .flags = SCA_USER, 1400 }; 1401 1402 retval = __sched_setaffinity(p, &ac); 1403 kfree(ac.user_mask); 1404 1405 return retval; 1406 } 1407 1408 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 1409 struct cpumask *new_mask) 1410 { 1411 if (len < cpumask_size()) 1412 cpumask_clear(new_mask); 1413 else if (len > cpumask_size()) 1414 len = cpumask_size(); 1415 1416 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 1417 } 1418 1419 /** 1420 * sys_sched_setaffinity - set the CPU affinity of a process 1421 * @pid: pid of the process 1422 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 1423 * @user_mask_ptr: user-space pointer to the new CPU mask 1424 * 1425 * Return: 0 on success. An error code otherwise. 1426 */ 1427 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 1428 unsigned long __user *, user_mask_ptr) 1429 { 1430 cpumask_var_t new_mask; 1431 int retval; 1432 1433 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 1434 return -ENOMEM; 1435 1436 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 1437 if (retval == 0) 1438 retval = sched_setaffinity(pid, new_mask); 1439 free_cpumask_var(new_mask); 1440 return retval; 1441 } 1442 1443 long sched_getaffinity(pid_t pid, struct cpumask *mask) 1444 { 1445 struct task_struct *p; 1446 int retval; 1447 1448 guard(rcu)(); 1449 p = find_process_by_pid(pid); 1450 if (!p) 1451 return -ESRCH; 1452 1453 retval = security_task_getscheduler(p); 1454 if (retval) 1455 return retval; 1456 1457 guard(raw_spinlock_irqsave)(&p->pi_lock); 1458 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 1459 1460 return 0; 1461 } 1462 1463 /** 1464 * sys_sched_getaffinity - get the CPU affinity of a process 1465 * @pid: pid of the process 1466 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 1467 * @user_mask_ptr: user-space pointer to hold the current CPU mask 1468 * 1469 * Return: size of CPU mask copied to user_mask_ptr on success. An 1470 * error code otherwise. 1471 */ 1472 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 1473 unsigned long __user *, user_mask_ptr) 1474 { 1475 int ret; 1476 cpumask_var_t mask; 1477 1478 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 1479 return -EINVAL; 1480 if (len & (sizeof(unsigned long)-1)) 1481 return -EINVAL; 1482 1483 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 1484 return -ENOMEM; 1485 1486 ret = sched_getaffinity(pid, mask); 1487 if (ret == 0) { 1488 unsigned int retlen = min(len, cpumask_size()); 1489 1490 if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen)) 1491 ret = -EFAULT; 1492 else 1493 ret = retlen; 1494 } 1495 free_cpumask_var(mask); 1496 1497 return ret; 1498 } 1499 1500 static void do_sched_yield(void) 1501 { 1502 struct rq_flags rf; 1503 struct rq *rq; 1504 1505 rq = this_rq_lock_irq(&rf); 1506 1507 schedstat_inc(rq->yld_count); 1508 current->sched_class->yield_task(rq); 1509 1510 preempt_disable(); 1511 rq_unlock_irq(rq, &rf); 1512 sched_preempt_enable_no_resched(); 1513 1514 schedule(); 1515 } 1516 1517 /** 1518 * sys_sched_yield - yield the current processor to other threads. 1519 * 1520 * This function yields the current CPU to other tasks. If there are no 1521 * other threads running on this CPU then this function will return. 1522 * 1523 * Return: 0. 1524 */ 1525 SYSCALL_DEFINE0(sched_yield) 1526 { 1527 do_sched_yield(); 1528 return 0; 1529 } 1530 1531 /** 1532 * yield - yield the current processor to other threads. 1533 * 1534 * Do not ever use this function, there's a 99% chance you're doing it wrong. 1535 * 1536 * The scheduler is at all times free to pick the calling task as the most 1537 * eligible task to run, if removing the yield() call from your code breaks 1538 * it, it's already broken. 1539 * 1540 * Typical broken usage is: 1541 * 1542 * while (!event) 1543 * yield(); 1544 * 1545 * where one assumes that yield() will let 'the other' process run that will 1546 * make event true. If the current task is a SCHED_FIFO task that will never 1547 * happen. Never use yield() as a progress guarantee!! 1548 * 1549 * If you want to use yield() to wait for something, use wait_event(). 1550 * If you want to use yield() to be 'nice' for others, use cond_resched(). 1551 * If you still want to use yield(), do not! 1552 */ 1553 void __sched yield(void) 1554 { 1555 set_current_state(TASK_RUNNING); 1556 do_sched_yield(); 1557 } 1558 EXPORT_SYMBOL(yield); 1559 1560 /** 1561 * yield_to - yield the current processor to another thread in 1562 * your thread group, or accelerate that thread toward the 1563 * processor it's on. 1564 * @p: target task 1565 * @preempt: whether task preemption is allowed or not 1566 * 1567 * It's the caller's job to ensure that the target task struct 1568 * can't go away on us before we can do any checks. 1569 * 1570 * Return: 1571 * true (>0) if we indeed boosted the target task. 1572 * false (0) if we failed to boost the target. 1573 * -ESRCH if there's no task to yield to. 1574 */ 1575 int __sched yield_to(struct task_struct *p, bool preempt) 1576 { 1577 struct task_struct *curr = current; 1578 struct rq *rq, *p_rq; 1579 int yielded = 0; 1580 1581 scoped_guard (irqsave) { 1582 rq = this_rq(); 1583 1584 again: 1585 p_rq = task_rq(p); 1586 /* 1587 * If we're the only runnable task on the rq and target rq also 1588 * has only one task, there's absolutely no point in yielding. 1589 */ 1590 if (rq->nr_running == 1 && p_rq->nr_running == 1) 1591 return -ESRCH; 1592 1593 guard(double_rq_lock)(rq, p_rq); 1594 if (task_rq(p) != p_rq) 1595 goto again; 1596 1597 if (!curr->sched_class->yield_to_task) 1598 return 0; 1599 1600 if (curr->sched_class != p->sched_class) 1601 return 0; 1602 1603 if (task_on_cpu(p_rq, p) || !task_is_running(p)) 1604 return 0; 1605 1606 yielded = curr->sched_class->yield_to_task(rq, p); 1607 if (yielded) { 1608 schedstat_inc(rq->yld_count); 1609 /* 1610 * Make p's CPU reschedule; pick_next_entity 1611 * takes care of fairness. 1612 */ 1613 if (preempt && rq != p_rq) 1614 resched_curr(p_rq); 1615 } 1616 } 1617 1618 if (yielded) 1619 schedule(); 1620 1621 return yielded; 1622 } 1623 EXPORT_SYMBOL_GPL(yield_to); 1624 1625 /** 1626 * sys_sched_get_priority_max - return maximum RT priority. 1627 * @policy: scheduling class. 1628 * 1629 * Return: On success, this syscall returns the maximum 1630 * rt_priority that can be used by a given scheduling class. 1631 * On failure, a negative error code is returned. 1632 */ 1633 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 1634 { 1635 int ret = -EINVAL; 1636 1637 switch (policy) { 1638 case SCHED_FIFO: 1639 case SCHED_RR: 1640 ret = MAX_RT_PRIO-1; 1641 break; 1642 case SCHED_DEADLINE: 1643 case SCHED_NORMAL: 1644 case SCHED_BATCH: 1645 case SCHED_IDLE: 1646 case SCHED_EXT: 1647 ret = 0; 1648 break; 1649 } 1650 return ret; 1651 } 1652 1653 /** 1654 * sys_sched_get_priority_min - return minimum RT priority. 1655 * @policy: scheduling class. 1656 * 1657 * Return: On success, this syscall returns the minimum 1658 * rt_priority that can be used by a given scheduling class. 1659 * On failure, a negative error code is returned. 1660 */ 1661 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 1662 { 1663 int ret = -EINVAL; 1664 1665 switch (policy) { 1666 case SCHED_FIFO: 1667 case SCHED_RR: 1668 ret = 1; 1669 break; 1670 case SCHED_DEADLINE: 1671 case SCHED_NORMAL: 1672 case SCHED_BATCH: 1673 case SCHED_IDLE: 1674 case SCHED_EXT: 1675 ret = 0; 1676 } 1677 return ret; 1678 } 1679 1680 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 1681 { 1682 unsigned int time_slice = 0; 1683 int retval; 1684 1685 if (pid < 0) 1686 return -EINVAL; 1687 1688 scoped_guard (rcu) { 1689 struct task_struct *p = find_process_by_pid(pid); 1690 if (!p) 1691 return -ESRCH; 1692 1693 retval = security_task_getscheduler(p); 1694 if (retval) 1695 return retval; 1696 1697 scoped_guard (task_rq_lock, p) { 1698 struct rq *rq = scope.rq; 1699 if (p->sched_class->get_rr_interval) 1700 time_slice = p->sched_class->get_rr_interval(rq, p); 1701 } 1702 } 1703 1704 jiffies_to_timespec64(time_slice, t); 1705 return 0; 1706 } 1707 1708 /** 1709 * sys_sched_rr_get_interval - return the default time-slice of a process. 1710 * @pid: pid of the process. 1711 * @interval: userspace pointer to the time-slice value. 1712 * 1713 * this syscall writes the default time-slice value of a given process 1714 * into the user-space timespec buffer. A value of '0' means infinity. 1715 * 1716 * Return: On success, 0 and the time-slice is in @interval. Otherwise, 1717 * an error code. 1718 */ 1719 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 1720 struct __kernel_timespec __user *, interval) 1721 { 1722 struct timespec64 t; 1723 int retval = sched_rr_get_interval(pid, &t); 1724 1725 if (retval == 0) 1726 retval = put_timespec64(&t, interval); 1727 1728 return retval; 1729 } 1730 1731 #ifdef CONFIG_COMPAT_32BIT_TIME 1732 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 1733 struct old_timespec32 __user *, interval) 1734 { 1735 struct timespec64 t; 1736 int retval = sched_rr_get_interval(pid, &t); 1737 1738 if (retval == 0) 1739 retval = put_old_timespec32(&t, interval); 1740 return retval; 1741 } 1742 #endif 1743