1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/syscalls.c 4 * 5 * Core kernel scheduler syscalls related code 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat 9 */ 10 #include <linux/sched.h> 11 #include <linux/cpuset.h> 12 #include <linux/sched/debug.h> 13 14 #include <uapi/linux/sched/types.h> 15 16 #include "sched.h" 17 #include "autogroup.h" 18 19 static inline int __normal_prio(int policy, int rt_prio, int nice) 20 { 21 int prio; 22 23 if (dl_policy(policy)) 24 prio = MAX_DL_PRIO - 1; 25 else if (rt_policy(policy)) 26 prio = MAX_RT_PRIO - 1 - rt_prio; 27 else 28 prio = NICE_TO_PRIO(nice); 29 30 return prio; 31 } 32 33 /* 34 * Calculate the expected normal priority: i.e. priority 35 * without taking RT-inheritance into account. Might be 36 * boosted by interactivity modifiers. Changes upon fork, 37 * setprio syscalls, and whenever the interactivity 38 * estimator recalculates. 39 */ 40 static inline int normal_prio(struct task_struct *p) 41 { 42 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); 43 } 44 45 /* 46 * Calculate the current priority, i.e. the priority 47 * taken into account by the scheduler. This value might 48 * be boosted by RT tasks, or might be boosted by 49 * interactivity modifiers. Will be RT if the task got 50 * RT-boosted. If not then it returns p->normal_prio. 51 */ 52 static int effective_prio(struct task_struct *p) 53 { 54 p->normal_prio = normal_prio(p); 55 /* 56 * If we are RT tasks or we were boosted to RT priority, 57 * keep the priority unchanged. Otherwise, update priority 58 * to the normal priority: 59 */ 60 if (!rt_or_dl_prio(p->prio)) 61 return p->normal_prio; 62 return p->prio; 63 } 64 65 void set_user_nice(struct task_struct *p, long nice) 66 { 67 bool queued, running; 68 struct rq *rq; 69 int old_prio; 70 71 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 72 return; 73 /* 74 * We have to be careful, if called from sys_setpriority(), 75 * the task might be in the middle of scheduling on another CPU. 76 */ 77 CLASS(task_rq_lock, rq_guard)(p); 78 rq = rq_guard.rq; 79 80 update_rq_clock(rq); 81 82 /* 83 * The RT priorities are set via sched_setscheduler(), but we still 84 * allow the 'normal' nice value to be set - but as expected 85 * it won't have any effect on scheduling until the task is 86 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 87 */ 88 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 89 p->static_prio = NICE_TO_PRIO(nice); 90 return; 91 } 92 93 queued = task_on_rq_queued(p); 94 running = task_current_donor(rq, p); 95 if (queued) 96 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 97 if (running) 98 put_prev_task(rq, p); 99 100 p->static_prio = NICE_TO_PRIO(nice); 101 set_load_weight(p, true); 102 old_prio = p->prio; 103 p->prio = effective_prio(p); 104 105 if (queued) 106 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 107 if (running) 108 set_next_task(rq, p); 109 110 /* 111 * If the task increased its priority or is running and 112 * lowered its priority, then reschedule its CPU: 113 */ 114 p->sched_class->prio_changed(rq, p, old_prio); 115 } 116 EXPORT_SYMBOL(set_user_nice); 117 118 /* 119 * is_nice_reduction - check if nice value is an actual reduction 120 * 121 * Similar to can_nice() but does not perform a capability check. 122 * 123 * @p: task 124 * @nice: nice value 125 */ 126 static bool is_nice_reduction(const struct task_struct *p, const int nice) 127 { 128 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 129 int nice_rlim = nice_to_rlimit(nice); 130 131 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); 132 } 133 134 /* 135 * can_nice - check if a task can reduce its nice value 136 * @p: task 137 * @nice: nice value 138 */ 139 int can_nice(const struct task_struct *p, const int nice) 140 { 141 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); 142 } 143 144 #ifdef __ARCH_WANT_SYS_NICE 145 146 /* 147 * sys_nice - change the priority of the current process. 148 * @increment: priority increment 149 * 150 * sys_setpriority is a more generic, but much slower function that 151 * does similar things. 152 */ 153 SYSCALL_DEFINE1(nice, int, increment) 154 { 155 long nice, retval; 156 157 /* 158 * Setpriority might change our priority at the same moment. 159 * We don't have to worry. Conceptually one call occurs first 160 * and we have a single winner. 161 */ 162 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 163 nice = task_nice(current) + increment; 164 165 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 166 if (increment < 0 && !can_nice(current, nice)) 167 return -EPERM; 168 169 retval = security_task_setnice(current, nice); 170 if (retval) 171 return retval; 172 173 set_user_nice(current, nice); 174 return 0; 175 } 176 177 #endif 178 179 /** 180 * task_prio - return the priority value of a given task. 181 * @p: the task in question. 182 * 183 * Return: The priority value as seen by users in /proc. 184 * 185 * sched policy return value kernel prio user prio/nice 186 * 187 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19] 188 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99] 189 * deadline -101 -1 0 190 */ 191 int task_prio(const struct task_struct *p) 192 { 193 return p->prio - MAX_RT_PRIO; 194 } 195 196 /** 197 * idle_cpu - is a given CPU idle currently? 198 * @cpu: the processor in question. 199 * 200 * Return: 1 if the CPU is currently idle. 0 otherwise. 201 */ 202 int idle_cpu(int cpu) 203 { 204 struct rq *rq = cpu_rq(cpu); 205 206 if (rq->curr != rq->idle) 207 return 0; 208 209 if (rq->nr_running) 210 return 0; 211 212 #ifdef CONFIG_SMP 213 if (rq->ttwu_pending) 214 return 0; 215 #endif 216 217 return 1; 218 } 219 220 /** 221 * available_idle_cpu - is a given CPU idle for enqueuing work. 222 * @cpu: the CPU in question. 223 * 224 * Return: 1 if the CPU is currently idle. 0 otherwise. 225 */ 226 int available_idle_cpu(int cpu) 227 { 228 if (!idle_cpu(cpu)) 229 return 0; 230 231 if (vcpu_is_preempted(cpu)) 232 return 0; 233 234 return 1; 235 } 236 237 /** 238 * idle_task - return the idle task for a given CPU. 239 * @cpu: the processor in question. 240 * 241 * Return: The idle task for the CPU @cpu. 242 */ 243 struct task_struct *idle_task(int cpu) 244 { 245 return cpu_rq(cpu)->idle; 246 } 247 248 #ifdef CONFIG_SCHED_CORE 249 int sched_core_idle_cpu(int cpu) 250 { 251 struct rq *rq = cpu_rq(cpu); 252 253 if (sched_core_enabled(rq) && rq->curr == rq->idle) 254 return 1; 255 256 return idle_cpu(cpu); 257 } 258 259 #endif 260 261 /** 262 * find_process_by_pid - find a process with a matching PID value. 263 * @pid: the pid in question. 264 * 265 * The task of @pid, if found. %NULL otherwise. 266 */ 267 static struct task_struct *find_process_by_pid(pid_t pid) 268 { 269 return pid ? find_task_by_vpid(pid) : current; 270 } 271 272 static struct task_struct *find_get_task(pid_t pid) 273 { 274 struct task_struct *p; 275 guard(rcu)(); 276 277 p = find_process_by_pid(pid); 278 if (likely(p)) 279 get_task_struct(p); 280 281 return p; 282 } 283 284 DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T), 285 find_get_task(pid), pid_t pid) 286 287 /* 288 * sched_setparam() passes in -1 for its policy, to let the functions 289 * it calls know not to change it. 290 */ 291 #define SETPARAM_POLICY -1 292 293 static void __setscheduler_params(struct task_struct *p, 294 const struct sched_attr *attr) 295 { 296 int policy = attr->sched_policy; 297 298 if (policy == SETPARAM_POLICY) 299 policy = p->policy; 300 301 p->policy = policy; 302 303 if (dl_policy(policy)) 304 __setparam_dl(p, attr); 305 else if (fair_policy(policy)) 306 __setparam_fair(p, attr); 307 308 /* rt-policy tasks do not have a timerslack */ 309 if (rt_or_dl_task_policy(p)) { 310 p->timer_slack_ns = 0; 311 } else if (p->timer_slack_ns == 0) { 312 /* when switching back to non-rt policy, restore timerslack */ 313 p->timer_slack_ns = p->default_timer_slack_ns; 314 } 315 316 /* 317 * __sched_setscheduler() ensures attr->sched_priority == 0 when 318 * !rt_policy. Always setting this ensures that things like 319 * getparam()/getattr() don't report silly values for !rt tasks. 320 */ 321 p->rt_priority = attr->sched_priority; 322 p->normal_prio = normal_prio(p); 323 set_load_weight(p, true); 324 } 325 326 /* 327 * Check the target process has a UID that matches the current process's: 328 */ 329 static bool check_same_owner(struct task_struct *p) 330 { 331 const struct cred *cred = current_cred(), *pcred; 332 guard(rcu)(); 333 334 pcred = __task_cred(p); 335 return (uid_eq(cred->euid, pcred->euid) || 336 uid_eq(cred->euid, pcred->uid)); 337 } 338 339 #ifdef CONFIG_UCLAMP_TASK 340 341 static int uclamp_validate(struct task_struct *p, 342 const struct sched_attr *attr) 343 { 344 int util_min = p->uclamp_req[UCLAMP_MIN].value; 345 int util_max = p->uclamp_req[UCLAMP_MAX].value; 346 347 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 348 util_min = attr->sched_util_min; 349 350 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) 351 return -EINVAL; 352 } 353 354 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 355 util_max = attr->sched_util_max; 356 357 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) 358 return -EINVAL; 359 } 360 361 if (util_min != -1 && util_max != -1 && util_min > util_max) 362 return -EINVAL; 363 364 /* 365 * We have valid uclamp attributes; make sure uclamp is enabled. 366 * 367 * We need to do that here, because enabling static branches is a 368 * blocking operation which obviously cannot be done while holding 369 * scheduler locks. 370 */ 371 static_branch_enable(&sched_uclamp_used); 372 373 return 0; 374 } 375 376 static bool uclamp_reset(const struct sched_attr *attr, 377 enum uclamp_id clamp_id, 378 struct uclamp_se *uc_se) 379 { 380 /* Reset on sched class change for a non user-defined clamp value. */ 381 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && 382 !uc_se->user_defined) 383 return true; 384 385 /* Reset on sched_util_{min,max} == -1. */ 386 if (clamp_id == UCLAMP_MIN && 387 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 388 attr->sched_util_min == -1) { 389 return true; 390 } 391 392 if (clamp_id == UCLAMP_MAX && 393 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 394 attr->sched_util_max == -1) { 395 return true; 396 } 397 398 return false; 399 } 400 401 static void __setscheduler_uclamp(struct task_struct *p, 402 const struct sched_attr *attr) 403 { 404 enum uclamp_id clamp_id; 405 406 for_each_clamp_id(clamp_id) { 407 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 408 unsigned int value; 409 410 if (!uclamp_reset(attr, clamp_id, uc_se)) 411 continue; 412 413 /* 414 * RT by default have a 100% boost value that could be modified 415 * at runtime. 416 */ 417 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 418 value = sysctl_sched_uclamp_util_min_rt_default; 419 else 420 value = uclamp_none(clamp_id); 421 422 uclamp_se_set(uc_se, value, false); 423 424 } 425 426 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 427 return; 428 429 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 430 attr->sched_util_min != -1) { 431 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 432 attr->sched_util_min, true); 433 } 434 435 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 436 attr->sched_util_max != -1) { 437 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 438 attr->sched_util_max, true); 439 } 440 } 441 442 #else /* !CONFIG_UCLAMP_TASK: */ 443 444 static inline int uclamp_validate(struct task_struct *p, 445 const struct sched_attr *attr) 446 { 447 return -EOPNOTSUPP; 448 } 449 static void __setscheduler_uclamp(struct task_struct *p, 450 const struct sched_attr *attr) { } 451 #endif 452 453 /* 454 * Allow unprivileged RT tasks to decrease priority. 455 * Only issue a capable test if needed and only once to avoid an audit 456 * event on permitted non-privileged operations: 457 */ 458 static int user_check_sched_setscheduler(struct task_struct *p, 459 const struct sched_attr *attr, 460 int policy, int reset_on_fork) 461 { 462 if (fair_policy(policy)) { 463 if (attr->sched_nice < task_nice(p) && 464 !is_nice_reduction(p, attr->sched_nice)) 465 goto req_priv; 466 } 467 468 if (rt_policy(policy)) { 469 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); 470 471 /* Can't set/change the rt policy: */ 472 if (policy != p->policy && !rlim_rtprio) 473 goto req_priv; 474 475 /* Can't increase priority: */ 476 if (attr->sched_priority > p->rt_priority && 477 attr->sched_priority > rlim_rtprio) 478 goto req_priv; 479 } 480 481 /* 482 * Can't set/change SCHED_DEADLINE policy at all for now 483 * (safest behavior); in the future we would like to allow 484 * unprivileged DL tasks to increase their relative deadline 485 * or reduce their runtime (both ways reducing utilization) 486 */ 487 if (dl_policy(policy)) 488 goto req_priv; 489 490 /* 491 * Treat SCHED_IDLE as nice 20. Only allow a switch to 492 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 493 */ 494 if (task_has_idle_policy(p) && !idle_policy(policy)) { 495 if (!is_nice_reduction(p, task_nice(p))) 496 goto req_priv; 497 } 498 499 /* Can't change other user's priorities: */ 500 if (!check_same_owner(p)) 501 goto req_priv; 502 503 /* Normal users shall not reset the sched_reset_on_fork flag: */ 504 if (p->sched_reset_on_fork && !reset_on_fork) 505 goto req_priv; 506 507 return 0; 508 509 req_priv: 510 if (!capable(CAP_SYS_NICE)) 511 return -EPERM; 512 513 return 0; 514 } 515 516 int __sched_setscheduler(struct task_struct *p, 517 const struct sched_attr *attr, 518 bool user, bool pi) 519 { 520 int oldpolicy = -1, policy = attr->sched_policy; 521 int retval, oldprio, newprio, queued, running; 522 const struct sched_class *prev_class, *next_class; 523 struct balance_callback *head; 524 struct rq_flags rf; 525 int reset_on_fork; 526 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 527 struct rq *rq; 528 bool cpuset_locked = false; 529 530 /* The pi code expects interrupts enabled */ 531 BUG_ON(pi && in_interrupt()); 532 recheck: 533 /* Double check policy once rq lock held: */ 534 if (policy < 0) { 535 reset_on_fork = p->sched_reset_on_fork; 536 policy = oldpolicy = p->policy; 537 } else { 538 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 539 540 if (!valid_policy(policy)) 541 return -EINVAL; 542 } 543 544 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 545 return -EINVAL; 546 547 /* 548 * Valid priorities for SCHED_FIFO and SCHED_RR are 549 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, 550 * SCHED_BATCH and SCHED_IDLE is 0. 551 */ 552 if (attr->sched_priority > MAX_RT_PRIO-1) 553 return -EINVAL; 554 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 555 (rt_policy(policy) != (attr->sched_priority != 0))) 556 return -EINVAL; 557 558 if (user) { 559 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); 560 if (retval) 561 return retval; 562 563 if (attr->sched_flags & SCHED_FLAG_SUGOV) 564 return -EINVAL; 565 566 retval = security_task_setscheduler(p); 567 if (retval) 568 return retval; 569 } 570 571 /* Update task specific "requested" clamps */ 572 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 573 retval = uclamp_validate(p, attr); 574 if (retval) 575 return retval; 576 } 577 578 /* 579 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets 580 * information. 581 */ 582 if (dl_policy(policy) || dl_policy(p->policy)) { 583 cpuset_locked = true; 584 cpuset_lock(); 585 } 586 587 /* 588 * Make sure no PI-waiters arrive (or leave) while we are 589 * changing the priority of the task: 590 * 591 * To be able to change p->policy safely, the appropriate 592 * runqueue lock must be held. 593 */ 594 rq = task_rq_lock(p, &rf); 595 update_rq_clock(rq); 596 597 /* 598 * Changing the policy of the stop threads its a very bad idea: 599 */ 600 if (p == rq->stop) { 601 retval = -EINVAL; 602 goto unlock; 603 } 604 605 retval = scx_check_setscheduler(p, policy); 606 if (retval) 607 goto unlock; 608 609 /* 610 * If not changing anything there's no need to proceed further, 611 * but store a possible modification of reset_on_fork. 612 */ 613 if (unlikely(policy == p->policy)) { 614 if (fair_policy(policy) && 615 (attr->sched_nice != task_nice(p) || 616 (attr->sched_runtime != p->se.slice))) 617 goto change; 618 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 619 goto change; 620 if (dl_policy(policy) && dl_param_changed(p, attr)) 621 goto change; 622 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 623 goto change; 624 625 p->sched_reset_on_fork = reset_on_fork; 626 retval = 0; 627 goto unlock; 628 } 629 change: 630 631 if (user) { 632 #ifdef CONFIG_RT_GROUP_SCHED 633 /* 634 * Do not allow real-time tasks into groups that have no runtime 635 * assigned. 636 */ 637 if (rt_bandwidth_enabled() && rt_policy(policy) && 638 task_group(p)->rt_bandwidth.rt_runtime == 0 && 639 !task_group_is_autogroup(task_group(p))) { 640 retval = -EPERM; 641 goto unlock; 642 } 643 #endif 644 #ifdef CONFIG_SMP 645 if (dl_bandwidth_enabled() && dl_policy(policy) && 646 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 647 cpumask_t *span = rq->rd->span; 648 649 /* 650 * Don't allow tasks with an affinity mask smaller than 651 * the entire root_domain to become SCHED_DEADLINE. We 652 * will also fail if there's no bandwidth available. 653 */ 654 if (!cpumask_subset(span, p->cpus_ptr) || 655 rq->rd->dl_bw.bw == 0) { 656 retval = -EPERM; 657 goto unlock; 658 } 659 } 660 #endif 661 } 662 663 /* Re-check policy now with rq lock held: */ 664 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 665 policy = oldpolicy = -1; 666 task_rq_unlock(rq, p, &rf); 667 if (cpuset_locked) 668 cpuset_unlock(); 669 goto recheck; 670 } 671 672 /* 673 * If setscheduling to SCHED_DEADLINE (or changing the parameters 674 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 675 * is available. 676 */ 677 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 678 retval = -EBUSY; 679 goto unlock; 680 } 681 682 p->sched_reset_on_fork = reset_on_fork; 683 oldprio = p->prio; 684 685 newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); 686 if (pi) { 687 /* 688 * Take priority boosted tasks into account. If the new 689 * effective priority is unchanged, we just store the new 690 * normal parameters and do not touch the scheduler class and 691 * the runqueue. This will be done when the task deboost 692 * itself. 693 */ 694 newprio = rt_effective_prio(p, newprio); 695 if (newprio == oldprio) 696 queue_flags &= ~DEQUEUE_MOVE; 697 } 698 699 prev_class = p->sched_class; 700 next_class = __setscheduler_class(policy, newprio); 701 702 if (prev_class != next_class && p->se.sched_delayed) 703 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK); 704 705 queued = task_on_rq_queued(p); 706 running = task_current_donor(rq, p); 707 if (queued) 708 dequeue_task(rq, p, queue_flags); 709 if (running) 710 put_prev_task(rq, p); 711 712 if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { 713 __setscheduler_params(p, attr); 714 p->sched_class = next_class; 715 p->prio = newprio; 716 } 717 __setscheduler_uclamp(p, attr); 718 check_class_changing(rq, p, prev_class); 719 720 if (queued) { 721 /* 722 * We enqueue to tail when the priority of a task is 723 * increased (user space view). 724 */ 725 if (oldprio < p->prio) 726 queue_flags |= ENQUEUE_HEAD; 727 728 enqueue_task(rq, p, queue_flags); 729 } 730 if (running) 731 set_next_task(rq, p); 732 733 check_class_changed(rq, p, prev_class, oldprio); 734 735 /* Avoid rq from going away on us: */ 736 preempt_disable(); 737 head = splice_balance_callbacks(rq); 738 task_rq_unlock(rq, p, &rf); 739 740 if (pi) { 741 if (cpuset_locked) 742 cpuset_unlock(); 743 rt_mutex_adjust_pi(p); 744 } 745 746 /* Run balance callbacks after we've adjusted the PI chain: */ 747 balance_callbacks(rq, head); 748 preempt_enable(); 749 750 return 0; 751 752 unlock: 753 task_rq_unlock(rq, p, &rf); 754 if (cpuset_locked) 755 cpuset_unlock(); 756 return retval; 757 } 758 759 static int _sched_setscheduler(struct task_struct *p, int policy, 760 const struct sched_param *param, bool check) 761 { 762 struct sched_attr attr = { 763 .sched_policy = policy, 764 .sched_priority = param->sched_priority, 765 .sched_nice = PRIO_TO_NICE(p->static_prio), 766 }; 767 768 if (p->se.custom_slice) 769 attr.sched_runtime = p->se.slice; 770 771 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 772 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 773 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 774 policy &= ~SCHED_RESET_ON_FORK; 775 attr.sched_policy = policy; 776 } 777 778 return __sched_setscheduler(p, &attr, check, true); 779 } 780 /** 781 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 782 * @p: the task in question. 783 * @policy: new policy. 784 * @param: structure containing the new RT priority. 785 * 786 * Use sched_set_fifo(), read its comment. 787 * 788 * Return: 0 on success. An error code otherwise. 789 * 790 * NOTE that the task may be already dead. 791 */ 792 int sched_setscheduler(struct task_struct *p, int policy, 793 const struct sched_param *param) 794 { 795 return _sched_setscheduler(p, policy, param, true); 796 } 797 798 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 799 { 800 return __sched_setscheduler(p, attr, true, true); 801 } 802 803 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 804 { 805 return __sched_setscheduler(p, attr, false, true); 806 } 807 EXPORT_SYMBOL_GPL(sched_setattr_nocheck); 808 809 /** 810 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernel-space. 811 * @p: the task in question. 812 * @policy: new policy. 813 * @param: structure containing the new RT priority. 814 * 815 * Just like sched_setscheduler, only don't bother checking if the 816 * current context has permission. For example, this is needed in 817 * stop_machine(): we create temporary high priority worker threads, 818 * but our caller might not have that capability. 819 * 820 * Return: 0 on success. An error code otherwise. 821 */ 822 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 823 const struct sched_param *param) 824 { 825 return _sched_setscheduler(p, policy, param, false); 826 } 827 828 /* 829 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally 830 * incapable of resource management, which is the one thing an OS really should 831 * be doing. 832 * 833 * This is of course the reason it is limited to privileged users only. 834 * 835 * Worse still; it is fundamentally impossible to compose static priority 836 * workloads. You cannot take two correctly working static prio workloads 837 * and smash them together and still expect them to work. 838 * 839 * For this reason 'all' FIFO tasks the kernel creates are basically at: 840 * 841 * MAX_RT_PRIO / 2 842 * 843 * The administrator _MUST_ configure the system, the kernel simply doesn't 844 * know enough information to make a sensible choice. 845 */ 846 void sched_set_fifo(struct task_struct *p) 847 { 848 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; 849 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 850 } 851 EXPORT_SYMBOL_GPL(sched_set_fifo); 852 853 /* 854 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. 855 */ 856 void sched_set_fifo_low(struct task_struct *p) 857 { 858 struct sched_param sp = { .sched_priority = 1 }; 859 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 860 } 861 EXPORT_SYMBOL_GPL(sched_set_fifo_low); 862 863 void sched_set_normal(struct task_struct *p, int nice) 864 { 865 struct sched_attr attr = { 866 .sched_policy = SCHED_NORMAL, 867 .sched_nice = nice, 868 }; 869 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); 870 } 871 EXPORT_SYMBOL_GPL(sched_set_normal); 872 873 static int 874 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 875 { 876 struct sched_param lparam; 877 878 if (!param || pid < 0) 879 return -EINVAL; 880 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 881 return -EFAULT; 882 883 CLASS(find_get_task, p)(pid); 884 if (!p) 885 return -ESRCH; 886 887 return sched_setscheduler(p, policy, &lparam); 888 } 889 890 /* 891 * Mimics kernel/events/core.c perf_copy_attr(). 892 */ 893 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 894 { 895 u32 size; 896 int ret; 897 898 /* Zero the full structure, so that a short copy will be nice: */ 899 memset(attr, 0, sizeof(*attr)); 900 901 ret = get_user(size, &uattr->size); 902 if (ret) 903 return ret; 904 905 /* ABI compatibility quirk: */ 906 if (!size) 907 size = SCHED_ATTR_SIZE_VER0; 908 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 909 goto err_size; 910 911 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 912 if (ret) { 913 if (ret == -E2BIG) 914 goto err_size; 915 return ret; 916 } 917 918 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 919 size < SCHED_ATTR_SIZE_VER1) 920 return -EINVAL; 921 922 /* 923 * XXX: Do we want to be lenient like existing syscalls; or do we want 924 * to be strict and return an error on out-of-bounds values? 925 */ 926 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 927 928 return 0; 929 930 err_size: 931 put_user(sizeof(*attr), &uattr->size); 932 return -E2BIG; 933 } 934 935 static void get_params(struct task_struct *p, struct sched_attr *attr) 936 { 937 if (task_has_dl_policy(p)) { 938 __getparam_dl(p, attr); 939 } else if (task_has_rt_policy(p)) { 940 attr->sched_priority = p->rt_priority; 941 } else { 942 attr->sched_nice = task_nice(p); 943 attr->sched_runtime = p->se.slice; 944 } 945 } 946 947 /** 948 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 949 * @pid: the pid in question. 950 * @policy: new policy. 951 * @param: structure containing the new RT priority. 952 * 953 * Return: 0 on success. An error code otherwise. 954 */ 955 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 956 { 957 if (policy < 0) 958 return -EINVAL; 959 960 return do_sched_setscheduler(pid, policy, param); 961 } 962 963 /** 964 * sys_sched_setparam - set/change the RT priority of a thread 965 * @pid: the pid in question. 966 * @param: structure containing the new RT priority. 967 * 968 * Return: 0 on success. An error code otherwise. 969 */ 970 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 971 { 972 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 973 } 974 975 /** 976 * sys_sched_setattr - same as above, but with extended sched_attr 977 * @pid: the pid in question. 978 * @uattr: structure containing the extended parameters. 979 * @flags: for future extension. 980 */ 981 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 982 unsigned int, flags) 983 { 984 struct sched_attr attr; 985 int retval; 986 987 if (!uattr || pid < 0 || flags) 988 return -EINVAL; 989 990 retval = sched_copy_attr(uattr, &attr); 991 if (retval) 992 return retval; 993 994 if ((int)attr.sched_policy < 0) 995 return -EINVAL; 996 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 997 attr.sched_policy = SETPARAM_POLICY; 998 999 CLASS(find_get_task, p)(pid); 1000 if (!p) 1001 return -ESRCH; 1002 1003 if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS) 1004 get_params(p, &attr); 1005 1006 return sched_setattr(p, &attr); 1007 } 1008 1009 /** 1010 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 1011 * @pid: the pid in question. 1012 * 1013 * Return: On success, the policy of the thread. Otherwise, a negative error 1014 * code. 1015 */ 1016 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 1017 { 1018 struct task_struct *p; 1019 int retval; 1020 1021 if (pid < 0) 1022 return -EINVAL; 1023 1024 guard(rcu)(); 1025 p = find_process_by_pid(pid); 1026 if (!p) 1027 return -ESRCH; 1028 1029 retval = security_task_getscheduler(p); 1030 if (!retval) { 1031 retval = p->policy; 1032 if (p->sched_reset_on_fork) 1033 retval |= SCHED_RESET_ON_FORK; 1034 } 1035 return retval; 1036 } 1037 1038 /** 1039 * sys_sched_getparam - get the RT priority of a thread 1040 * @pid: the pid in question. 1041 * @param: structure containing the RT priority. 1042 * 1043 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 1044 * code. 1045 */ 1046 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 1047 { 1048 struct sched_param lp = { .sched_priority = 0 }; 1049 struct task_struct *p; 1050 int retval; 1051 1052 if (!param || pid < 0) 1053 return -EINVAL; 1054 1055 scoped_guard (rcu) { 1056 p = find_process_by_pid(pid); 1057 if (!p) 1058 return -ESRCH; 1059 1060 retval = security_task_getscheduler(p); 1061 if (retval) 1062 return retval; 1063 1064 if (task_has_rt_policy(p)) 1065 lp.sched_priority = p->rt_priority; 1066 } 1067 1068 /* 1069 * This one might sleep, we cannot do it with a spinlock held ... 1070 */ 1071 return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 1072 } 1073 1074 /** 1075 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 1076 * @pid: the pid in question. 1077 * @uattr: structure containing the extended parameters. 1078 * @usize: sizeof(attr) for fwd/bwd comp. 1079 * @flags: for future extension. 1080 */ 1081 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 1082 unsigned int, usize, unsigned int, flags) 1083 { 1084 struct sched_attr kattr = { }; 1085 struct task_struct *p; 1086 int retval; 1087 1088 if (!uattr || pid < 0 || usize > PAGE_SIZE || 1089 usize < SCHED_ATTR_SIZE_VER0 || flags) 1090 return -EINVAL; 1091 1092 scoped_guard (rcu) { 1093 p = find_process_by_pid(pid); 1094 if (!p) 1095 return -ESRCH; 1096 1097 retval = security_task_getscheduler(p); 1098 if (retval) 1099 return retval; 1100 1101 kattr.sched_policy = p->policy; 1102 if (p->sched_reset_on_fork) 1103 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 1104 get_params(p, &kattr); 1105 kattr.sched_flags &= SCHED_FLAG_ALL; 1106 1107 #ifdef CONFIG_UCLAMP_TASK 1108 /* 1109 * This could race with another potential updater, but this is fine 1110 * because it'll correctly read the old or the new value. We don't need 1111 * to guarantee who wins the race as long as it doesn't return garbage. 1112 */ 1113 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 1114 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 1115 #endif 1116 } 1117 1118 kattr.size = min(usize, sizeof(kattr)); 1119 return copy_struct_to_user(uattr, usize, &kattr, sizeof(kattr), NULL); 1120 } 1121 1122 #ifdef CONFIG_SMP 1123 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 1124 { 1125 /* 1126 * If the task isn't a deadline task or admission control is 1127 * disabled then we don't care about affinity changes. 1128 */ 1129 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) 1130 return 0; 1131 1132 /* 1133 * Since bandwidth control happens on root_domain basis, 1134 * if admission test is enabled, we only admit -deadline 1135 * tasks allowed to run on all the CPUs in the task's 1136 * root_domain. 1137 */ 1138 guard(rcu)(); 1139 if (!cpumask_subset(task_rq(p)->rd->span, mask)) 1140 return -EBUSY; 1141 1142 return 0; 1143 } 1144 #endif /* CONFIG_SMP */ 1145 1146 int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) 1147 { 1148 int retval; 1149 cpumask_var_t cpus_allowed, new_mask; 1150 1151 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) 1152 return -ENOMEM; 1153 1154 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 1155 retval = -ENOMEM; 1156 goto out_free_cpus_allowed; 1157 } 1158 1159 cpuset_cpus_allowed(p, cpus_allowed); 1160 cpumask_and(new_mask, ctx->new_mask, cpus_allowed); 1161 1162 ctx->new_mask = new_mask; 1163 ctx->flags |= SCA_CHECK; 1164 1165 retval = dl_task_check_affinity(p, new_mask); 1166 if (retval) 1167 goto out_free_new_mask; 1168 1169 retval = __set_cpus_allowed_ptr(p, ctx); 1170 if (retval) 1171 goto out_free_new_mask; 1172 1173 cpuset_cpus_allowed(p, cpus_allowed); 1174 if (!cpumask_subset(new_mask, cpus_allowed)) { 1175 /* 1176 * We must have raced with a concurrent cpuset update. 1177 * Just reset the cpumask to the cpuset's cpus_allowed. 1178 */ 1179 cpumask_copy(new_mask, cpus_allowed); 1180 1181 /* 1182 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr() 1183 * will restore the previous user_cpus_ptr value. 1184 * 1185 * In the unlikely event a previous user_cpus_ptr exists, 1186 * we need to further restrict the mask to what is allowed 1187 * by that old user_cpus_ptr. 1188 */ 1189 if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) { 1190 bool empty = !cpumask_and(new_mask, new_mask, 1191 ctx->user_mask); 1192 1193 if (empty) 1194 cpumask_copy(new_mask, cpus_allowed); 1195 } 1196 __set_cpus_allowed_ptr(p, ctx); 1197 retval = -EINVAL; 1198 } 1199 1200 out_free_new_mask: 1201 free_cpumask_var(new_mask); 1202 out_free_cpus_allowed: 1203 free_cpumask_var(cpus_allowed); 1204 return retval; 1205 } 1206 1207 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 1208 { 1209 struct affinity_context ac; 1210 struct cpumask *user_mask; 1211 int retval; 1212 1213 CLASS(find_get_task, p)(pid); 1214 if (!p) 1215 return -ESRCH; 1216 1217 if (p->flags & PF_NO_SETAFFINITY) 1218 return -EINVAL; 1219 1220 if (!check_same_owner(p)) { 1221 guard(rcu)(); 1222 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) 1223 return -EPERM; 1224 } 1225 1226 retval = security_task_setscheduler(p); 1227 if (retval) 1228 return retval; 1229 1230 /* 1231 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and 1232 * alloc_user_cpus_ptr() returns NULL. 1233 */ 1234 user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE); 1235 if (user_mask) { 1236 cpumask_copy(user_mask, in_mask); 1237 } else if (IS_ENABLED(CONFIG_SMP)) { 1238 return -ENOMEM; 1239 } 1240 1241 ac = (struct affinity_context){ 1242 .new_mask = in_mask, 1243 .user_mask = user_mask, 1244 .flags = SCA_USER, 1245 }; 1246 1247 retval = __sched_setaffinity(p, &ac); 1248 kfree(ac.user_mask); 1249 1250 return retval; 1251 } 1252 1253 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 1254 struct cpumask *new_mask) 1255 { 1256 if (len < cpumask_size()) 1257 cpumask_clear(new_mask); 1258 else if (len > cpumask_size()) 1259 len = cpumask_size(); 1260 1261 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 1262 } 1263 1264 /** 1265 * sys_sched_setaffinity - set the CPU affinity of a process 1266 * @pid: pid of the process 1267 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 1268 * @user_mask_ptr: user-space pointer to the new CPU mask 1269 * 1270 * Return: 0 on success. An error code otherwise. 1271 */ 1272 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 1273 unsigned long __user *, user_mask_ptr) 1274 { 1275 cpumask_var_t new_mask; 1276 int retval; 1277 1278 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 1279 return -ENOMEM; 1280 1281 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 1282 if (retval == 0) 1283 retval = sched_setaffinity(pid, new_mask); 1284 free_cpumask_var(new_mask); 1285 return retval; 1286 } 1287 1288 long sched_getaffinity(pid_t pid, struct cpumask *mask) 1289 { 1290 struct task_struct *p; 1291 int retval; 1292 1293 guard(rcu)(); 1294 p = find_process_by_pid(pid); 1295 if (!p) 1296 return -ESRCH; 1297 1298 retval = security_task_getscheduler(p); 1299 if (retval) 1300 return retval; 1301 1302 guard(raw_spinlock_irqsave)(&p->pi_lock); 1303 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 1304 1305 return 0; 1306 } 1307 1308 /** 1309 * sys_sched_getaffinity - get the CPU affinity of a process 1310 * @pid: pid of the process 1311 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 1312 * @user_mask_ptr: user-space pointer to hold the current CPU mask 1313 * 1314 * Return: size of CPU mask copied to user_mask_ptr on success. An 1315 * error code otherwise. 1316 */ 1317 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 1318 unsigned long __user *, user_mask_ptr) 1319 { 1320 int ret; 1321 cpumask_var_t mask; 1322 1323 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 1324 return -EINVAL; 1325 if (len & (sizeof(unsigned long)-1)) 1326 return -EINVAL; 1327 1328 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 1329 return -ENOMEM; 1330 1331 ret = sched_getaffinity(pid, mask); 1332 if (ret == 0) { 1333 unsigned int retlen = min(len, cpumask_size()); 1334 1335 if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen)) 1336 ret = -EFAULT; 1337 else 1338 ret = retlen; 1339 } 1340 free_cpumask_var(mask); 1341 1342 return ret; 1343 } 1344 1345 static void do_sched_yield(void) 1346 { 1347 struct rq_flags rf; 1348 struct rq *rq; 1349 1350 rq = this_rq_lock_irq(&rf); 1351 1352 schedstat_inc(rq->yld_count); 1353 current->sched_class->yield_task(rq); 1354 1355 preempt_disable(); 1356 rq_unlock_irq(rq, &rf); 1357 sched_preempt_enable_no_resched(); 1358 1359 schedule(); 1360 } 1361 1362 /** 1363 * sys_sched_yield - yield the current processor to other threads. 1364 * 1365 * This function yields the current CPU to other tasks. If there are no 1366 * other threads running on this CPU then this function will return. 1367 * 1368 * Return: 0. 1369 */ 1370 SYSCALL_DEFINE0(sched_yield) 1371 { 1372 do_sched_yield(); 1373 return 0; 1374 } 1375 1376 /** 1377 * yield - yield the current processor to other threads. 1378 * 1379 * Do not ever use this function, there's a 99% chance you're doing it wrong. 1380 * 1381 * The scheduler is at all times free to pick the calling task as the most 1382 * eligible task to run, if removing the yield() call from your code breaks 1383 * it, it's already broken. 1384 * 1385 * Typical broken usage is: 1386 * 1387 * while (!event) 1388 * yield(); 1389 * 1390 * where one assumes that yield() will let 'the other' process run that will 1391 * make event true. If the current task is a SCHED_FIFO task that will never 1392 * happen. Never use yield() as a progress guarantee!! 1393 * 1394 * If you want to use yield() to wait for something, use wait_event(). 1395 * If you want to use yield() to be 'nice' for others, use cond_resched(). 1396 * If you still want to use yield(), do not! 1397 */ 1398 void __sched yield(void) 1399 { 1400 set_current_state(TASK_RUNNING); 1401 do_sched_yield(); 1402 } 1403 EXPORT_SYMBOL(yield); 1404 1405 /** 1406 * yield_to - yield the current processor to another thread in 1407 * your thread group, or accelerate that thread toward the 1408 * processor it's on. 1409 * @p: target task 1410 * @preempt: whether task preemption is allowed or not 1411 * 1412 * It's the caller's job to ensure that the target task struct 1413 * can't go away on us before we can do any checks. 1414 * 1415 * Return: 1416 * true (>0) if we indeed boosted the target task. 1417 * false (0) if we failed to boost the target. 1418 * -ESRCH if there's no task to yield to. 1419 */ 1420 int __sched yield_to(struct task_struct *p, bool preempt) 1421 { 1422 struct task_struct *curr = current; 1423 struct rq *rq, *p_rq; 1424 int yielded = 0; 1425 1426 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { 1427 rq = this_rq(); 1428 1429 again: 1430 p_rq = task_rq(p); 1431 /* 1432 * If we're the only runnable task on the rq and target rq also 1433 * has only one task, there's absolutely no point in yielding. 1434 */ 1435 if (rq->nr_running == 1 && p_rq->nr_running == 1) 1436 return -ESRCH; 1437 1438 guard(double_rq_lock)(rq, p_rq); 1439 if (task_rq(p) != p_rq) 1440 goto again; 1441 1442 if (!curr->sched_class->yield_to_task) 1443 return 0; 1444 1445 if (curr->sched_class != p->sched_class) 1446 return 0; 1447 1448 if (task_on_cpu(p_rq, p) || !task_is_running(p)) 1449 return 0; 1450 1451 yielded = curr->sched_class->yield_to_task(rq, p); 1452 if (yielded) { 1453 schedstat_inc(rq->yld_count); 1454 /* 1455 * Make p's CPU reschedule; pick_next_entity 1456 * takes care of fairness. 1457 */ 1458 if (preempt && rq != p_rq) 1459 resched_curr(p_rq); 1460 } 1461 } 1462 1463 if (yielded) 1464 schedule(); 1465 1466 return yielded; 1467 } 1468 EXPORT_SYMBOL_GPL(yield_to); 1469 1470 /** 1471 * sys_sched_get_priority_max - return maximum RT priority. 1472 * @policy: scheduling class. 1473 * 1474 * Return: On success, this syscall returns the maximum 1475 * rt_priority that can be used by a given scheduling class. 1476 * On failure, a negative error code is returned. 1477 */ 1478 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 1479 { 1480 int ret = -EINVAL; 1481 1482 switch (policy) { 1483 case SCHED_FIFO: 1484 case SCHED_RR: 1485 ret = MAX_RT_PRIO-1; 1486 break; 1487 case SCHED_DEADLINE: 1488 case SCHED_NORMAL: 1489 case SCHED_BATCH: 1490 case SCHED_IDLE: 1491 case SCHED_EXT: 1492 ret = 0; 1493 break; 1494 } 1495 return ret; 1496 } 1497 1498 /** 1499 * sys_sched_get_priority_min - return minimum RT priority. 1500 * @policy: scheduling class. 1501 * 1502 * Return: On success, this syscall returns the minimum 1503 * rt_priority that can be used by a given scheduling class. 1504 * On failure, a negative error code is returned. 1505 */ 1506 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 1507 { 1508 int ret = -EINVAL; 1509 1510 switch (policy) { 1511 case SCHED_FIFO: 1512 case SCHED_RR: 1513 ret = 1; 1514 break; 1515 case SCHED_DEADLINE: 1516 case SCHED_NORMAL: 1517 case SCHED_BATCH: 1518 case SCHED_IDLE: 1519 case SCHED_EXT: 1520 ret = 0; 1521 } 1522 return ret; 1523 } 1524 1525 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 1526 { 1527 unsigned int time_slice = 0; 1528 int retval; 1529 1530 if (pid < 0) 1531 return -EINVAL; 1532 1533 scoped_guard (rcu) { 1534 struct task_struct *p = find_process_by_pid(pid); 1535 if (!p) 1536 return -ESRCH; 1537 1538 retval = security_task_getscheduler(p); 1539 if (retval) 1540 return retval; 1541 1542 scoped_guard (task_rq_lock, p) { 1543 struct rq *rq = scope.rq; 1544 if (p->sched_class->get_rr_interval) 1545 time_slice = p->sched_class->get_rr_interval(rq, p); 1546 } 1547 } 1548 1549 jiffies_to_timespec64(time_slice, t); 1550 return 0; 1551 } 1552 1553 /** 1554 * sys_sched_rr_get_interval - return the default time-slice of a process. 1555 * @pid: pid of the process. 1556 * @interval: userspace pointer to the time-slice value. 1557 * 1558 * this syscall writes the default time-slice value of a given process 1559 * into the user-space timespec buffer. A value of '0' means infinity. 1560 * 1561 * Return: On success, 0 and the time-slice is in @interval. Otherwise, 1562 * an error code. 1563 */ 1564 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 1565 struct __kernel_timespec __user *, interval) 1566 { 1567 struct timespec64 t; 1568 int retval = sched_rr_get_interval(pid, &t); 1569 1570 if (retval == 0) 1571 retval = put_timespec64(&t, interval); 1572 1573 return retval; 1574 } 1575 1576 #ifdef CONFIG_COMPAT_32BIT_TIME 1577 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 1578 struct old_timespec32 __user *, interval) 1579 { 1580 struct timespec64 t; 1581 int retval = sched_rr_get_interval(pid, &t); 1582 1583 if (retval == 0) 1584 retval = put_old_timespec32(&t, interval); 1585 return retval; 1586 } 1587 #endif 1588