104746ed8SIngo Molnar // SPDX-License-Identifier: GPL-2.0-only 204746ed8SIngo Molnar /* 304746ed8SIngo Molnar * kernel/sched/syscalls.c 404746ed8SIngo Molnar * 504746ed8SIngo Molnar * Core kernel scheduler syscalls related code 604746ed8SIngo Molnar * 704746ed8SIngo Molnar * Copyright (C) 1991-2002 Linus Torvalds 804746ed8SIngo Molnar * Copyright (C) 1998-2024 Ingo Molnar, Red Hat 904746ed8SIngo Molnar */ 1004746ed8SIngo Molnar #include <linux/sched.h> 1104746ed8SIngo Molnar #include <linux/cpuset.h> 1204746ed8SIngo Molnar #include <linux/sched/debug.h> 1304746ed8SIngo Molnar 1404746ed8SIngo Molnar #include <uapi/linux/sched/types.h> 1504746ed8SIngo Molnar 1604746ed8SIngo Molnar #include "sched.h" 1704746ed8SIngo Molnar #include "autogroup.h" 1804746ed8SIngo Molnar 1904746ed8SIngo Molnar static inline int __normal_prio(int policy, int rt_prio, int nice) 2004746ed8SIngo Molnar { 2104746ed8SIngo Molnar int prio; 2204746ed8SIngo Molnar 2304746ed8SIngo Molnar if (dl_policy(policy)) 2404746ed8SIngo Molnar prio = MAX_DL_PRIO - 1; 2504746ed8SIngo Molnar else if (rt_policy(policy)) 2604746ed8SIngo Molnar prio = MAX_RT_PRIO - 1 - rt_prio; 2704746ed8SIngo Molnar else 2804746ed8SIngo Molnar prio = NICE_TO_PRIO(nice); 2904746ed8SIngo Molnar 3004746ed8SIngo Molnar return prio; 3104746ed8SIngo Molnar } 3204746ed8SIngo Molnar 3304746ed8SIngo Molnar /* 3404746ed8SIngo Molnar * Calculate the expected normal priority: i.e. priority 3504746ed8SIngo Molnar * without taking RT-inheritance into account. Might be 3604746ed8SIngo Molnar * boosted by interactivity modifiers. Changes upon fork, 3704746ed8SIngo Molnar * setprio syscalls, and whenever the interactivity 3804746ed8SIngo Molnar * estimator recalculates. 3904746ed8SIngo Molnar */ 4004746ed8SIngo Molnar static inline int normal_prio(struct task_struct *p) 4104746ed8SIngo Molnar { 4204746ed8SIngo Molnar return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); 4304746ed8SIngo Molnar } 4404746ed8SIngo Molnar 4504746ed8SIngo Molnar /* 4604746ed8SIngo Molnar * Calculate the current priority, i.e. the priority 4704746ed8SIngo Molnar * taken into account by the scheduler. This value might 4804746ed8SIngo Molnar * be boosted by RT tasks, or might be boosted by 4904746ed8SIngo Molnar * interactivity modifiers. Will be RT if the task got 5004746ed8SIngo Molnar * RT-boosted. If not then it returns p->normal_prio. 5104746ed8SIngo Molnar */ 5204746ed8SIngo Molnar static int effective_prio(struct task_struct *p) 5304746ed8SIngo Molnar { 5404746ed8SIngo Molnar p->normal_prio = normal_prio(p); 5504746ed8SIngo Molnar /* 5604746ed8SIngo Molnar * If we are RT tasks or we were boosted to RT priority, 5704746ed8SIngo Molnar * keep the priority unchanged. Otherwise, update priority 5804746ed8SIngo Molnar * to the normal priority: 5904746ed8SIngo Molnar */ 60ae04f69dSQais Yousef if (!rt_or_dl_prio(p->prio)) 6104746ed8SIngo Molnar return p->normal_prio; 6204746ed8SIngo Molnar return p->prio; 6304746ed8SIngo Molnar } 6404746ed8SIngo Molnar 6504746ed8SIngo Molnar void set_user_nice(struct task_struct *p, long nice) 6604746ed8SIngo Molnar { 6704746ed8SIngo Molnar bool queued, running; 6804746ed8SIngo Molnar struct rq *rq; 6904746ed8SIngo Molnar int old_prio; 7004746ed8SIngo Molnar 7104746ed8SIngo Molnar if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 7204746ed8SIngo Molnar return; 7304746ed8SIngo Molnar /* 7404746ed8SIngo Molnar * We have to be careful, if called from sys_setpriority(), 7504746ed8SIngo Molnar * the task might be in the middle of scheduling on another CPU. 7604746ed8SIngo Molnar */ 7704746ed8SIngo Molnar CLASS(task_rq_lock, rq_guard)(p); 7804746ed8SIngo Molnar rq = rq_guard.rq; 7904746ed8SIngo Molnar 8004746ed8SIngo Molnar update_rq_clock(rq); 8104746ed8SIngo Molnar 8204746ed8SIngo Molnar /* 8304746ed8SIngo Molnar * The RT priorities are set via sched_setscheduler(), but we still 8404746ed8SIngo Molnar * allow the 'normal' nice value to be set - but as expected 8504746ed8SIngo Molnar * it won't have any effect on scheduling until the task is 8604746ed8SIngo Molnar * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 8704746ed8SIngo Molnar */ 8804746ed8SIngo Molnar if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 8904746ed8SIngo Molnar p->static_prio = NICE_TO_PRIO(nice); 9004746ed8SIngo Molnar return; 9104746ed8SIngo Molnar } 9204746ed8SIngo Molnar 9304746ed8SIngo Molnar queued = task_on_rq_queued(p); 9404746ed8SIngo Molnar running = task_current(rq, p); 9504746ed8SIngo Molnar if (queued) 9604746ed8SIngo Molnar dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 9704746ed8SIngo Molnar if (running) 9804746ed8SIngo Molnar put_prev_task(rq, p); 9904746ed8SIngo Molnar 10004746ed8SIngo Molnar p->static_prio = NICE_TO_PRIO(nice); 10104746ed8SIngo Molnar set_load_weight(p, true); 10204746ed8SIngo Molnar old_prio = p->prio; 10304746ed8SIngo Molnar p->prio = effective_prio(p); 10404746ed8SIngo Molnar 10504746ed8SIngo Molnar if (queued) 10604746ed8SIngo Molnar enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 10704746ed8SIngo Molnar if (running) 10804746ed8SIngo Molnar set_next_task(rq, p); 10904746ed8SIngo Molnar 11004746ed8SIngo Molnar /* 11104746ed8SIngo Molnar * If the task increased its priority or is running and 11204746ed8SIngo Molnar * lowered its priority, then reschedule its CPU: 11304746ed8SIngo Molnar */ 11404746ed8SIngo Molnar p->sched_class->prio_changed(rq, p, old_prio); 11504746ed8SIngo Molnar } 11604746ed8SIngo Molnar EXPORT_SYMBOL(set_user_nice); 11704746ed8SIngo Molnar 11804746ed8SIngo Molnar /* 11904746ed8SIngo Molnar * is_nice_reduction - check if nice value is an actual reduction 12004746ed8SIngo Molnar * 12104746ed8SIngo Molnar * Similar to can_nice() but does not perform a capability check. 12204746ed8SIngo Molnar * 12304746ed8SIngo Molnar * @p: task 12404746ed8SIngo Molnar * @nice: nice value 12504746ed8SIngo Molnar */ 12604746ed8SIngo Molnar static bool is_nice_reduction(const struct task_struct *p, const int nice) 12704746ed8SIngo Molnar { 12804746ed8SIngo Molnar /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 12904746ed8SIngo Molnar int nice_rlim = nice_to_rlimit(nice); 13004746ed8SIngo Molnar 13104746ed8SIngo Molnar return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); 13204746ed8SIngo Molnar } 13304746ed8SIngo Molnar 13404746ed8SIngo Molnar /* 13504746ed8SIngo Molnar * can_nice - check if a task can reduce its nice value 13604746ed8SIngo Molnar * @p: task 13704746ed8SIngo Molnar * @nice: nice value 13804746ed8SIngo Molnar */ 13904746ed8SIngo Molnar int can_nice(const struct task_struct *p, const int nice) 14004746ed8SIngo Molnar { 14104746ed8SIngo Molnar return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); 14204746ed8SIngo Molnar } 14304746ed8SIngo Molnar 14404746ed8SIngo Molnar #ifdef __ARCH_WANT_SYS_NICE 14504746ed8SIngo Molnar 14604746ed8SIngo Molnar /* 14704746ed8SIngo Molnar * sys_nice - change the priority of the current process. 14804746ed8SIngo Molnar * @increment: priority increment 14904746ed8SIngo Molnar * 15004746ed8SIngo Molnar * sys_setpriority is a more generic, but much slower function that 15104746ed8SIngo Molnar * does similar things. 15204746ed8SIngo Molnar */ 15304746ed8SIngo Molnar SYSCALL_DEFINE1(nice, int, increment) 15404746ed8SIngo Molnar { 15504746ed8SIngo Molnar long nice, retval; 15604746ed8SIngo Molnar 15704746ed8SIngo Molnar /* 15804746ed8SIngo Molnar * Setpriority might change our priority at the same moment. 15904746ed8SIngo Molnar * We don't have to worry. Conceptually one call occurs first 16004746ed8SIngo Molnar * and we have a single winner. 16104746ed8SIngo Molnar */ 16204746ed8SIngo Molnar increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 16304746ed8SIngo Molnar nice = task_nice(current) + increment; 16404746ed8SIngo Molnar 16504746ed8SIngo Molnar nice = clamp_val(nice, MIN_NICE, MAX_NICE); 16604746ed8SIngo Molnar if (increment < 0 && !can_nice(current, nice)) 16704746ed8SIngo Molnar return -EPERM; 16804746ed8SIngo Molnar 16904746ed8SIngo Molnar retval = security_task_setnice(current, nice); 17004746ed8SIngo Molnar if (retval) 17104746ed8SIngo Molnar return retval; 17204746ed8SIngo Molnar 17304746ed8SIngo Molnar set_user_nice(current, nice); 17404746ed8SIngo Molnar return 0; 17504746ed8SIngo Molnar } 17604746ed8SIngo Molnar 17704746ed8SIngo Molnar #endif 17804746ed8SIngo Molnar 17904746ed8SIngo Molnar /** 18004746ed8SIngo Molnar * task_prio - return the priority value of a given task. 18104746ed8SIngo Molnar * @p: the task in question. 18204746ed8SIngo Molnar * 18304746ed8SIngo Molnar * Return: The priority value as seen by users in /proc. 18404746ed8SIngo Molnar * 18504746ed8SIngo Molnar * sched policy return value kernel prio user prio/nice 18604746ed8SIngo Molnar * 18704746ed8SIngo Molnar * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19] 18804746ed8SIngo Molnar * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99] 18904746ed8SIngo Molnar * deadline -101 -1 0 19004746ed8SIngo Molnar */ 19104746ed8SIngo Molnar int task_prio(const struct task_struct *p) 19204746ed8SIngo Molnar { 19304746ed8SIngo Molnar return p->prio - MAX_RT_PRIO; 19404746ed8SIngo Molnar } 19504746ed8SIngo Molnar 19604746ed8SIngo Molnar /** 19704746ed8SIngo Molnar * idle_cpu - is a given CPU idle currently? 19804746ed8SIngo Molnar * @cpu: the processor in question. 19904746ed8SIngo Molnar * 20004746ed8SIngo Molnar * Return: 1 if the CPU is currently idle. 0 otherwise. 20104746ed8SIngo Molnar */ 20204746ed8SIngo Molnar int idle_cpu(int cpu) 20304746ed8SIngo Molnar { 20404746ed8SIngo Molnar struct rq *rq = cpu_rq(cpu); 20504746ed8SIngo Molnar 20604746ed8SIngo Molnar if (rq->curr != rq->idle) 20704746ed8SIngo Molnar return 0; 20804746ed8SIngo Molnar 20904746ed8SIngo Molnar if (rq->nr_running) 21004746ed8SIngo Molnar return 0; 21104746ed8SIngo Molnar 21204746ed8SIngo Molnar #ifdef CONFIG_SMP 21304746ed8SIngo Molnar if (rq->ttwu_pending) 21404746ed8SIngo Molnar return 0; 21504746ed8SIngo Molnar #endif 21604746ed8SIngo Molnar 21704746ed8SIngo Molnar return 1; 21804746ed8SIngo Molnar } 21904746ed8SIngo Molnar 22004746ed8SIngo Molnar /** 22104746ed8SIngo Molnar * available_idle_cpu - is a given CPU idle for enqueuing work. 22204746ed8SIngo Molnar * @cpu: the CPU in question. 22304746ed8SIngo Molnar * 22404746ed8SIngo Molnar * Return: 1 if the CPU is currently idle. 0 otherwise. 22504746ed8SIngo Molnar */ 22604746ed8SIngo Molnar int available_idle_cpu(int cpu) 22704746ed8SIngo Molnar { 22804746ed8SIngo Molnar if (!idle_cpu(cpu)) 22904746ed8SIngo Molnar return 0; 23004746ed8SIngo Molnar 23104746ed8SIngo Molnar if (vcpu_is_preempted(cpu)) 23204746ed8SIngo Molnar return 0; 23304746ed8SIngo Molnar 23404746ed8SIngo Molnar return 1; 23504746ed8SIngo Molnar } 23604746ed8SIngo Molnar 23704746ed8SIngo Molnar /** 23804746ed8SIngo Molnar * idle_task - return the idle task for a given CPU. 23904746ed8SIngo Molnar * @cpu: the processor in question. 24004746ed8SIngo Molnar * 24104746ed8SIngo Molnar * Return: The idle task for the CPU @cpu. 24204746ed8SIngo Molnar */ 24304746ed8SIngo Molnar struct task_struct *idle_task(int cpu) 24404746ed8SIngo Molnar { 24504746ed8SIngo Molnar return cpu_rq(cpu)->idle; 24604746ed8SIngo Molnar } 24704746ed8SIngo Molnar 24804746ed8SIngo Molnar #ifdef CONFIG_SCHED_CORE 24904746ed8SIngo Molnar int sched_core_idle_cpu(int cpu) 25004746ed8SIngo Molnar { 25104746ed8SIngo Molnar struct rq *rq = cpu_rq(cpu); 25204746ed8SIngo Molnar 25304746ed8SIngo Molnar if (sched_core_enabled(rq) && rq->curr == rq->idle) 25404746ed8SIngo Molnar return 1; 25504746ed8SIngo Molnar 25604746ed8SIngo Molnar return idle_cpu(cpu); 25704746ed8SIngo Molnar } 25804746ed8SIngo Molnar 25904746ed8SIngo Molnar #endif 26004746ed8SIngo Molnar 26104746ed8SIngo Molnar /** 26204746ed8SIngo Molnar * find_process_by_pid - find a process with a matching PID value. 26304746ed8SIngo Molnar * @pid: the pid in question. 26404746ed8SIngo Molnar * 26504746ed8SIngo Molnar * The task of @pid, if found. %NULL otherwise. 26604746ed8SIngo Molnar */ 26704746ed8SIngo Molnar static struct task_struct *find_process_by_pid(pid_t pid) 26804746ed8SIngo Molnar { 26904746ed8SIngo Molnar return pid ? find_task_by_vpid(pid) : current; 27004746ed8SIngo Molnar } 27104746ed8SIngo Molnar 27204746ed8SIngo Molnar static struct task_struct *find_get_task(pid_t pid) 27304746ed8SIngo Molnar { 27404746ed8SIngo Molnar struct task_struct *p; 27504746ed8SIngo Molnar guard(rcu)(); 27604746ed8SIngo Molnar 27704746ed8SIngo Molnar p = find_process_by_pid(pid); 27804746ed8SIngo Molnar if (likely(p)) 27904746ed8SIngo Molnar get_task_struct(p); 28004746ed8SIngo Molnar 28104746ed8SIngo Molnar return p; 28204746ed8SIngo Molnar } 28304746ed8SIngo Molnar 28404746ed8SIngo Molnar DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T), 28504746ed8SIngo Molnar find_get_task(pid), pid_t pid) 28604746ed8SIngo Molnar 28704746ed8SIngo Molnar /* 28804746ed8SIngo Molnar * sched_setparam() passes in -1 for its policy, to let the functions 28904746ed8SIngo Molnar * it calls know not to change it. 29004746ed8SIngo Molnar */ 29104746ed8SIngo Molnar #define SETPARAM_POLICY -1 29204746ed8SIngo Molnar 29304746ed8SIngo Molnar static void __setscheduler_params(struct task_struct *p, 29404746ed8SIngo Molnar const struct sched_attr *attr) 29504746ed8SIngo Molnar { 29604746ed8SIngo Molnar int policy = attr->sched_policy; 29704746ed8SIngo Molnar 29804746ed8SIngo Molnar if (policy == SETPARAM_POLICY) 29904746ed8SIngo Molnar policy = p->policy; 30004746ed8SIngo Molnar 30104746ed8SIngo Molnar p->policy = policy; 30204746ed8SIngo Molnar 303857b158dSPeter Zijlstra if (dl_policy(policy)) { 30404746ed8SIngo Molnar __setparam_dl(p, attr); 305857b158dSPeter Zijlstra } else if (fair_policy(policy)) { 30604746ed8SIngo Molnar p->static_prio = NICE_TO_PRIO(attr->sched_nice); 307857b158dSPeter Zijlstra if (attr->sched_runtime) { 308857b158dSPeter Zijlstra p->se.custom_slice = 1; 309857b158dSPeter Zijlstra p->se.slice = clamp_t(u64, attr->sched_runtime, 310857b158dSPeter Zijlstra NSEC_PER_MSEC/10, /* HZ=1000 * 10 */ 311857b158dSPeter Zijlstra NSEC_PER_MSEC*100); /* HZ=100 / 10 */ 312857b158dSPeter Zijlstra } else { 313857b158dSPeter Zijlstra p->se.custom_slice = 0; 314857b158dSPeter Zijlstra p->se.slice = sysctl_sched_base_slice; 315857b158dSPeter Zijlstra } 316857b158dSPeter Zijlstra } 31704746ed8SIngo Molnar 318ed4fb6d7SFelix Moessbauer /* rt-policy tasks do not have a timerslack */ 3192004cef1SLinus Torvalds if (rt_or_dl_task_policy(p)) { 320ed4fb6d7SFelix Moessbauer p->timer_slack_ns = 0; 321ed4fb6d7SFelix Moessbauer } else if (p->timer_slack_ns == 0) { 322ed4fb6d7SFelix Moessbauer /* when switching back to non-rt policy, restore timerslack */ 323ed4fb6d7SFelix Moessbauer p->timer_slack_ns = p->default_timer_slack_ns; 324ed4fb6d7SFelix Moessbauer } 325ed4fb6d7SFelix Moessbauer 32604746ed8SIngo Molnar /* 32704746ed8SIngo Molnar * __sched_setscheduler() ensures attr->sched_priority == 0 when 32804746ed8SIngo Molnar * !rt_policy. Always setting this ensures that things like 32904746ed8SIngo Molnar * getparam()/getattr() don't report silly values for !rt tasks. 33004746ed8SIngo Molnar */ 33104746ed8SIngo Molnar p->rt_priority = attr->sched_priority; 33204746ed8SIngo Molnar p->normal_prio = normal_prio(p); 33304746ed8SIngo Molnar set_load_weight(p, true); 33404746ed8SIngo Molnar } 33504746ed8SIngo Molnar 33604746ed8SIngo Molnar /* 33704746ed8SIngo Molnar * Check the target process has a UID that matches the current process's: 33804746ed8SIngo Molnar */ 33904746ed8SIngo Molnar static bool check_same_owner(struct task_struct *p) 34004746ed8SIngo Molnar { 34104746ed8SIngo Molnar const struct cred *cred = current_cred(), *pcred; 34204746ed8SIngo Molnar guard(rcu)(); 34304746ed8SIngo Molnar 34404746ed8SIngo Molnar pcred = __task_cred(p); 34504746ed8SIngo Molnar return (uid_eq(cred->euid, pcred->euid) || 34604746ed8SIngo Molnar uid_eq(cred->euid, pcred->uid)); 34704746ed8SIngo Molnar } 34804746ed8SIngo Molnar 34904746ed8SIngo Molnar #ifdef CONFIG_UCLAMP_TASK 35004746ed8SIngo Molnar 35104746ed8SIngo Molnar static int uclamp_validate(struct task_struct *p, 35204746ed8SIngo Molnar const struct sched_attr *attr) 35304746ed8SIngo Molnar { 35404746ed8SIngo Molnar int util_min = p->uclamp_req[UCLAMP_MIN].value; 35504746ed8SIngo Molnar int util_max = p->uclamp_req[UCLAMP_MAX].value; 35604746ed8SIngo Molnar 35704746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 35804746ed8SIngo Molnar util_min = attr->sched_util_min; 35904746ed8SIngo Molnar 36004746ed8SIngo Molnar if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) 36104746ed8SIngo Molnar return -EINVAL; 36204746ed8SIngo Molnar } 36304746ed8SIngo Molnar 36404746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 36504746ed8SIngo Molnar util_max = attr->sched_util_max; 36604746ed8SIngo Molnar 36704746ed8SIngo Molnar if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) 36804746ed8SIngo Molnar return -EINVAL; 36904746ed8SIngo Molnar } 37004746ed8SIngo Molnar 37104746ed8SIngo Molnar if (util_min != -1 && util_max != -1 && util_min > util_max) 37204746ed8SIngo Molnar return -EINVAL; 37304746ed8SIngo Molnar 37404746ed8SIngo Molnar /* 37504746ed8SIngo Molnar * We have valid uclamp attributes; make sure uclamp is enabled. 37604746ed8SIngo Molnar * 37704746ed8SIngo Molnar * We need to do that here, because enabling static branches is a 37804746ed8SIngo Molnar * blocking operation which obviously cannot be done while holding 37904746ed8SIngo Molnar * scheduler locks. 38004746ed8SIngo Molnar */ 38104746ed8SIngo Molnar static_branch_enable(&sched_uclamp_used); 38204746ed8SIngo Molnar 38304746ed8SIngo Molnar return 0; 38404746ed8SIngo Molnar } 38504746ed8SIngo Molnar 38604746ed8SIngo Molnar static bool uclamp_reset(const struct sched_attr *attr, 38704746ed8SIngo Molnar enum uclamp_id clamp_id, 38804746ed8SIngo Molnar struct uclamp_se *uc_se) 38904746ed8SIngo Molnar { 39004746ed8SIngo Molnar /* Reset on sched class change for a non user-defined clamp value. */ 39104746ed8SIngo Molnar if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && 39204746ed8SIngo Molnar !uc_se->user_defined) 39304746ed8SIngo Molnar return true; 39404746ed8SIngo Molnar 39504746ed8SIngo Molnar /* Reset on sched_util_{min,max} == -1. */ 39604746ed8SIngo Molnar if (clamp_id == UCLAMP_MIN && 39704746ed8SIngo Molnar attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 39804746ed8SIngo Molnar attr->sched_util_min == -1) { 39904746ed8SIngo Molnar return true; 40004746ed8SIngo Molnar } 40104746ed8SIngo Molnar 40204746ed8SIngo Molnar if (clamp_id == UCLAMP_MAX && 40304746ed8SIngo Molnar attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 40404746ed8SIngo Molnar attr->sched_util_max == -1) { 40504746ed8SIngo Molnar return true; 40604746ed8SIngo Molnar } 40704746ed8SIngo Molnar 40804746ed8SIngo Molnar return false; 40904746ed8SIngo Molnar } 41004746ed8SIngo Molnar 41104746ed8SIngo Molnar static void __setscheduler_uclamp(struct task_struct *p, 41204746ed8SIngo Molnar const struct sched_attr *attr) 41304746ed8SIngo Molnar { 41404746ed8SIngo Molnar enum uclamp_id clamp_id; 41504746ed8SIngo Molnar 41604746ed8SIngo Molnar for_each_clamp_id(clamp_id) { 41704746ed8SIngo Molnar struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 41804746ed8SIngo Molnar unsigned int value; 41904746ed8SIngo Molnar 42004746ed8SIngo Molnar if (!uclamp_reset(attr, clamp_id, uc_se)) 42104746ed8SIngo Molnar continue; 42204746ed8SIngo Molnar 42304746ed8SIngo Molnar /* 42404746ed8SIngo Molnar * RT by default have a 100% boost value that could be modified 42504746ed8SIngo Molnar * at runtime. 42604746ed8SIngo Molnar */ 42704746ed8SIngo Molnar if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 42804746ed8SIngo Molnar value = sysctl_sched_uclamp_util_min_rt_default; 42904746ed8SIngo Molnar else 43004746ed8SIngo Molnar value = uclamp_none(clamp_id); 43104746ed8SIngo Molnar 43204746ed8SIngo Molnar uclamp_se_set(uc_se, value, false); 43304746ed8SIngo Molnar 43404746ed8SIngo Molnar } 43504746ed8SIngo Molnar 43604746ed8SIngo Molnar if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 43704746ed8SIngo Molnar return; 43804746ed8SIngo Molnar 43904746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 44004746ed8SIngo Molnar attr->sched_util_min != -1) { 44104746ed8SIngo Molnar uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 44204746ed8SIngo Molnar attr->sched_util_min, true); 44304746ed8SIngo Molnar } 44404746ed8SIngo Molnar 44504746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 44604746ed8SIngo Molnar attr->sched_util_max != -1) { 44704746ed8SIngo Molnar uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 44804746ed8SIngo Molnar attr->sched_util_max, true); 44904746ed8SIngo Molnar } 45004746ed8SIngo Molnar } 45104746ed8SIngo Molnar 45204746ed8SIngo Molnar #else /* !CONFIG_UCLAMP_TASK: */ 45304746ed8SIngo Molnar 45404746ed8SIngo Molnar static inline int uclamp_validate(struct task_struct *p, 45504746ed8SIngo Molnar const struct sched_attr *attr) 45604746ed8SIngo Molnar { 45704746ed8SIngo Molnar return -EOPNOTSUPP; 45804746ed8SIngo Molnar } 45904746ed8SIngo Molnar static void __setscheduler_uclamp(struct task_struct *p, 46004746ed8SIngo Molnar const struct sched_attr *attr) { } 46104746ed8SIngo Molnar #endif 46204746ed8SIngo Molnar 46304746ed8SIngo Molnar /* 46404746ed8SIngo Molnar * Allow unprivileged RT tasks to decrease priority. 46504746ed8SIngo Molnar * Only issue a capable test if needed and only once to avoid an audit 46604746ed8SIngo Molnar * event on permitted non-privileged operations: 46704746ed8SIngo Molnar */ 46804746ed8SIngo Molnar static int user_check_sched_setscheduler(struct task_struct *p, 46904746ed8SIngo Molnar const struct sched_attr *attr, 47004746ed8SIngo Molnar int policy, int reset_on_fork) 47104746ed8SIngo Molnar { 47204746ed8SIngo Molnar if (fair_policy(policy)) { 47304746ed8SIngo Molnar if (attr->sched_nice < task_nice(p) && 47404746ed8SIngo Molnar !is_nice_reduction(p, attr->sched_nice)) 47504746ed8SIngo Molnar goto req_priv; 47604746ed8SIngo Molnar } 47704746ed8SIngo Molnar 47804746ed8SIngo Molnar if (rt_policy(policy)) { 47904746ed8SIngo Molnar unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); 48004746ed8SIngo Molnar 48104746ed8SIngo Molnar /* Can't set/change the rt policy: */ 48204746ed8SIngo Molnar if (policy != p->policy && !rlim_rtprio) 48304746ed8SIngo Molnar goto req_priv; 48404746ed8SIngo Molnar 48504746ed8SIngo Molnar /* Can't increase priority: */ 48604746ed8SIngo Molnar if (attr->sched_priority > p->rt_priority && 48704746ed8SIngo Molnar attr->sched_priority > rlim_rtprio) 48804746ed8SIngo Molnar goto req_priv; 48904746ed8SIngo Molnar } 49004746ed8SIngo Molnar 49104746ed8SIngo Molnar /* 49204746ed8SIngo Molnar * Can't set/change SCHED_DEADLINE policy at all for now 49304746ed8SIngo Molnar * (safest behavior); in the future we would like to allow 49404746ed8SIngo Molnar * unprivileged DL tasks to increase their relative deadline 49504746ed8SIngo Molnar * or reduce their runtime (both ways reducing utilization) 49604746ed8SIngo Molnar */ 49704746ed8SIngo Molnar if (dl_policy(policy)) 49804746ed8SIngo Molnar goto req_priv; 49904746ed8SIngo Molnar 50004746ed8SIngo Molnar /* 50104746ed8SIngo Molnar * Treat SCHED_IDLE as nice 20. Only allow a switch to 50204746ed8SIngo Molnar * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 50304746ed8SIngo Molnar */ 50404746ed8SIngo Molnar if (task_has_idle_policy(p) && !idle_policy(policy)) { 50504746ed8SIngo Molnar if (!is_nice_reduction(p, task_nice(p))) 50604746ed8SIngo Molnar goto req_priv; 50704746ed8SIngo Molnar } 50804746ed8SIngo Molnar 50904746ed8SIngo Molnar /* Can't change other user's priorities: */ 51004746ed8SIngo Molnar if (!check_same_owner(p)) 51104746ed8SIngo Molnar goto req_priv; 51204746ed8SIngo Molnar 51304746ed8SIngo Molnar /* Normal users shall not reset the sched_reset_on_fork flag: */ 51404746ed8SIngo Molnar if (p->sched_reset_on_fork && !reset_on_fork) 51504746ed8SIngo Molnar goto req_priv; 51604746ed8SIngo Molnar 51704746ed8SIngo Molnar return 0; 51804746ed8SIngo Molnar 51904746ed8SIngo Molnar req_priv: 52004746ed8SIngo Molnar if (!capable(CAP_SYS_NICE)) 52104746ed8SIngo Molnar return -EPERM; 52204746ed8SIngo Molnar 52304746ed8SIngo Molnar return 0; 52404746ed8SIngo Molnar } 52504746ed8SIngo Molnar 52604746ed8SIngo Molnar int __sched_setscheduler(struct task_struct *p, 52704746ed8SIngo Molnar const struct sched_attr *attr, 52804746ed8SIngo Molnar bool user, bool pi) 52904746ed8SIngo Molnar { 53004746ed8SIngo Molnar int oldpolicy = -1, policy = attr->sched_policy; 53104746ed8SIngo Molnar int retval, oldprio, newprio, queued, running; 532*98442f0cSPeter Zijlstra const struct sched_class *prev_class, *next_class; 53304746ed8SIngo Molnar struct balance_callback *head; 53404746ed8SIngo Molnar struct rq_flags rf; 53504746ed8SIngo Molnar int reset_on_fork; 53604746ed8SIngo Molnar int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 53704746ed8SIngo Molnar struct rq *rq; 53804746ed8SIngo Molnar bool cpuset_locked = false; 53904746ed8SIngo Molnar 54004746ed8SIngo Molnar /* The pi code expects interrupts enabled */ 54104746ed8SIngo Molnar BUG_ON(pi && in_interrupt()); 54204746ed8SIngo Molnar recheck: 54304746ed8SIngo Molnar /* Double check policy once rq lock held: */ 54404746ed8SIngo Molnar if (policy < 0) { 54504746ed8SIngo Molnar reset_on_fork = p->sched_reset_on_fork; 54604746ed8SIngo Molnar policy = oldpolicy = p->policy; 54704746ed8SIngo Molnar } else { 54804746ed8SIngo Molnar reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 54904746ed8SIngo Molnar 55004746ed8SIngo Molnar if (!valid_policy(policy)) 55104746ed8SIngo Molnar return -EINVAL; 55204746ed8SIngo Molnar } 55304746ed8SIngo Molnar 55404746ed8SIngo Molnar if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 55504746ed8SIngo Molnar return -EINVAL; 55604746ed8SIngo Molnar 55704746ed8SIngo Molnar /* 55804746ed8SIngo Molnar * Valid priorities for SCHED_FIFO and SCHED_RR are 55904746ed8SIngo Molnar * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, 56004746ed8SIngo Molnar * SCHED_BATCH and SCHED_IDLE is 0. 56104746ed8SIngo Molnar */ 56204746ed8SIngo Molnar if (attr->sched_priority > MAX_RT_PRIO-1) 56304746ed8SIngo Molnar return -EINVAL; 56404746ed8SIngo Molnar if ((dl_policy(policy) && !__checkparam_dl(attr)) || 56504746ed8SIngo Molnar (rt_policy(policy) != (attr->sched_priority != 0))) 56604746ed8SIngo Molnar return -EINVAL; 56704746ed8SIngo Molnar 56804746ed8SIngo Molnar if (user) { 56904746ed8SIngo Molnar retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); 57004746ed8SIngo Molnar if (retval) 57104746ed8SIngo Molnar return retval; 57204746ed8SIngo Molnar 57304746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_SUGOV) 57404746ed8SIngo Molnar return -EINVAL; 57504746ed8SIngo Molnar 57604746ed8SIngo Molnar retval = security_task_setscheduler(p); 57704746ed8SIngo Molnar if (retval) 57804746ed8SIngo Molnar return retval; 57904746ed8SIngo Molnar } 58004746ed8SIngo Molnar 58104746ed8SIngo Molnar /* Update task specific "requested" clamps */ 58204746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 58304746ed8SIngo Molnar retval = uclamp_validate(p, attr); 58404746ed8SIngo Molnar if (retval) 58504746ed8SIngo Molnar return retval; 58604746ed8SIngo Molnar } 58704746ed8SIngo Molnar 58804746ed8SIngo Molnar /* 58904746ed8SIngo Molnar * SCHED_DEADLINE bandwidth accounting relies on stable cpusets 59004746ed8SIngo Molnar * information. 59104746ed8SIngo Molnar */ 59204746ed8SIngo Molnar if (dl_policy(policy) || dl_policy(p->policy)) { 59304746ed8SIngo Molnar cpuset_locked = true; 59404746ed8SIngo Molnar cpuset_lock(); 59504746ed8SIngo Molnar } 59604746ed8SIngo Molnar 59704746ed8SIngo Molnar /* 59804746ed8SIngo Molnar * Make sure no PI-waiters arrive (or leave) while we are 59904746ed8SIngo Molnar * changing the priority of the task: 60004746ed8SIngo Molnar * 60104746ed8SIngo Molnar * To be able to change p->policy safely, the appropriate 60204746ed8SIngo Molnar * runqueue lock must be held. 60304746ed8SIngo Molnar */ 60404746ed8SIngo Molnar rq = task_rq_lock(p, &rf); 60504746ed8SIngo Molnar update_rq_clock(rq); 60604746ed8SIngo Molnar 60704746ed8SIngo Molnar /* 60804746ed8SIngo Molnar * Changing the policy of the stop threads its a very bad idea: 60904746ed8SIngo Molnar */ 61004746ed8SIngo Molnar if (p == rq->stop) { 61104746ed8SIngo Molnar retval = -EINVAL; 61204746ed8SIngo Molnar goto unlock; 61304746ed8SIngo Molnar } 61404746ed8SIngo Molnar 6157bb6f081STejun Heo retval = scx_check_setscheduler(p, policy); 6167bb6f081STejun Heo if (retval) 6177bb6f081STejun Heo goto unlock; 6187bb6f081STejun Heo 61904746ed8SIngo Molnar /* 62004746ed8SIngo Molnar * If not changing anything there's no need to proceed further, 62104746ed8SIngo Molnar * but store a possible modification of reset_on_fork. 62204746ed8SIngo Molnar */ 62304746ed8SIngo Molnar if (unlikely(policy == p->policy)) { 624857b158dSPeter Zijlstra if (fair_policy(policy) && 625857b158dSPeter Zijlstra (attr->sched_nice != task_nice(p) || 626857b158dSPeter Zijlstra (attr->sched_runtime != p->se.slice))) 62704746ed8SIngo Molnar goto change; 62804746ed8SIngo Molnar if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 62904746ed8SIngo Molnar goto change; 63004746ed8SIngo Molnar if (dl_policy(policy) && dl_param_changed(p, attr)) 63104746ed8SIngo Molnar goto change; 63204746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 63304746ed8SIngo Molnar goto change; 63404746ed8SIngo Molnar 63504746ed8SIngo Molnar p->sched_reset_on_fork = reset_on_fork; 63604746ed8SIngo Molnar retval = 0; 63704746ed8SIngo Molnar goto unlock; 63804746ed8SIngo Molnar } 63904746ed8SIngo Molnar change: 64004746ed8SIngo Molnar 64104746ed8SIngo Molnar if (user) { 64204746ed8SIngo Molnar #ifdef CONFIG_RT_GROUP_SCHED 64304746ed8SIngo Molnar /* 644402de7fcSIngo Molnar * Do not allow real-time tasks into groups that have no runtime 64504746ed8SIngo Molnar * assigned. 64604746ed8SIngo Molnar */ 64704746ed8SIngo Molnar if (rt_bandwidth_enabled() && rt_policy(policy) && 64804746ed8SIngo Molnar task_group(p)->rt_bandwidth.rt_runtime == 0 && 64904746ed8SIngo Molnar !task_group_is_autogroup(task_group(p))) { 65004746ed8SIngo Molnar retval = -EPERM; 65104746ed8SIngo Molnar goto unlock; 65204746ed8SIngo Molnar } 65304746ed8SIngo Molnar #endif 65404746ed8SIngo Molnar #ifdef CONFIG_SMP 65504746ed8SIngo Molnar if (dl_bandwidth_enabled() && dl_policy(policy) && 65604746ed8SIngo Molnar !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 65704746ed8SIngo Molnar cpumask_t *span = rq->rd->span; 65804746ed8SIngo Molnar 65904746ed8SIngo Molnar /* 66004746ed8SIngo Molnar * Don't allow tasks with an affinity mask smaller than 66104746ed8SIngo Molnar * the entire root_domain to become SCHED_DEADLINE. We 66204746ed8SIngo Molnar * will also fail if there's no bandwidth available. 66304746ed8SIngo Molnar */ 66404746ed8SIngo Molnar if (!cpumask_subset(span, p->cpus_ptr) || 66504746ed8SIngo Molnar rq->rd->dl_bw.bw == 0) { 66604746ed8SIngo Molnar retval = -EPERM; 66704746ed8SIngo Molnar goto unlock; 66804746ed8SIngo Molnar } 66904746ed8SIngo Molnar } 67004746ed8SIngo Molnar #endif 67104746ed8SIngo Molnar } 67204746ed8SIngo Molnar 67304746ed8SIngo Molnar /* Re-check policy now with rq lock held: */ 67404746ed8SIngo Molnar if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 67504746ed8SIngo Molnar policy = oldpolicy = -1; 67604746ed8SIngo Molnar task_rq_unlock(rq, p, &rf); 67704746ed8SIngo Molnar if (cpuset_locked) 67804746ed8SIngo Molnar cpuset_unlock(); 67904746ed8SIngo Molnar goto recheck; 68004746ed8SIngo Molnar } 68104746ed8SIngo Molnar 68204746ed8SIngo Molnar /* 68304746ed8SIngo Molnar * If setscheduling to SCHED_DEADLINE (or changing the parameters 68404746ed8SIngo Molnar * of a SCHED_DEADLINE task) we need to check if enough bandwidth 68504746ed8SIngo Molnar * is available. 68604746ed8SIngo Molnar */ 68704746ed8SIngo Molnar if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 68804746ed8SIngo Molnar retval = -EBUSY; 68904746ed8SIngo Molnar goto unlock; 69004746ed8SIngo Molnar } 69104746ed8SIngo Molnar 69204746ed8SIngo Molnar p->sched_reset_on_fork = reset_on_fork; 69304746ed8SIngo Molnar oldprio = p->prio; 69404746ed8SIngo Molnar 69504746ed8SIngo Molnar newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); 69604746ed8SIngo Molnar if (pi) { 69704746ed8SIngo Molnar /* 69804746ed8SIngo Molnar * Take priority boosted tasks into account. If the new 69904746ed8SIngo Molnar * effective priority is unchanged, we just store the new 70004746ed8SIngo Molnar * normal parameters and do not touch the scheduler class and 70104746ed8SIngo Molnar * the runqueue. This will be done when the task deboost 70204746ed8SIngo Molnar * itself. 70304746ed8SIngo Molnar */ 70404746ed8SIngo Molnar newprio = rt_effective_prio(p, newprio); 70504746ed8SIngo Molnar if (newprio == oldprio) 70604746ed8SIngo Molnar queue_flags &= ~DEQUEUE_MOVE; 70704746ed8SIngo Molnar } 70804746ed8SIngo Molnar 709*98442f0cSPeter Zijlstra prev_class = p->sched_class; 710*98442f0cSPeter Zijlstra next_class = __setscheduler_class(p, newprio); 711*98442f0cSPeter Zijlstra 712*98442f0cSPeter Zijlstra if (prev_class != next_class && p->se.sched_delayed) 713*98442f0cSPeter Zijlstra dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK); 714*98442f0cSPeter Zijlstra 71504746ed8SIngo Molnar queued = task_on_rq_queued(p); 71604746ed8SIngo Molnar running = task_current(rq, p); 71704746ed8SIngo Molnar if (queued) 71804746ed8SIngo Molnar dequeue_task(rq, p, queue_flags); 71904746ed8SIngo Molnar if (running) 72004746ed8SIngo Molnar put_prev_task(rq, p); 72104746ed8SIngo Molnar 72204746ed8SIngo Molnar if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { 72304746ed8SIngo Molnar __setscheduler_params(p, attr); 724*98442f0cSPeter Zijlstra p->sched_class = next_class; 725*98442f0cSPeter Zijlstra p->prio = newprio; 72604746ed8SIngo Molnar } 72704746ed8SIngo Molnar __setscheduler_uclamp(p, attr); 728d8c7bc2eSTejun Heo check_class_changing(rq, p, prev_class); 72904746ed8SIngo Molnar 73004746ed8SIngo Molnar if (queued) { 73104746ed8SIngo Molnar /* 73204746ed8SIngo Molnar * We enqueue to tail when the priority of a task is 73304746ed8SIngo Molnar * increased (user space view). 73404746ed8SIngo Molnar */ 73504746ed8SIngo Molnar if (oldprio < p->prio) 73604746ed8SIngo Molnar queue_flags |= ENQUEUE_HEAD; 73704746ed8SIngo Molnar 73804746ed8SIngo Molnar enqueue_task(rq, p, queue_flags); 73904746ed8SIngo Molnar } 74004746ed8SIngo Molnar if (running) 74104746ed8SIngo Molnar set_next_task(rq, p); 74204746ed8SIngo Molnar 74304746ed8SIngo Molnar check_class_changed(rq, p, prev_class, oldprio); 74404746ed8SIngo Molnar 74504746ed8SIngo Molnar /* Avoid rq from going away on us: */ 74604746ed8SIngo Molnar preempt_disable(); 74704746ed8SIngo Molnar head = splice_balance_callbacks(rq); 74804746ed8SIngo Molnar task_rq_unlock(rq, p, &rf); 74904746ed8SIngo Molnar 75004746ed8SIngo Molnar if (pi) { 75104746ed8SIngo Molnar if (cpuset_locked) 75204746ed8SIngo Molnar cpuset_unlock(); 75304746ed8SIngo Molnar rt_mutex_adjust_pi(p); 75404746ed8SIngo Molnar } 75504746ed8SIngo Molnar 75604746ed8SIngo Molnar /* Run balance callbacks after we've adjusted the PI chain: */ 75704746ed8SIngo Molnar balance_callbacks(rq, head); 75804746ed8SIngo Molnar preempt_enable(); 75904746ed8SIngo Molnar 76004746ed8SIngo Molnar return 0; 76104746ed8SIngo Molnar 76204746ed8SIngo Molnar unlock: 76304746ed8SIngo Molnar task_rq_unlock(rq, p, &rf); 76404746ed8SIngo Molnar if (cpuset_locked) 76504746ed8SIngo Molnar cpuset_unlock(); 76604746ed8SIngo Molnar return retval; 76704746ed8SIngo Molnar } 76804746ed8SIngo Molnar 76904746ed8SIngo Molnar static int _sched_setscheduler(struct task_struct *p, int policy, 77004746ed8SIngo Molnar const struct sched_param *param, bool check) 77104746ed8SIngo Molnar { 77204746ed8SIngo Molnar struct sched_attr attr = { 77304746ed8SIngo Molnar .sched_policy = policy, 77404746ed8SIngo Molnar .sched_priority = param->sched_priority, 77504746ed8SIngo Molnar .sched_nice = PRIO_TO_NICE(p->static_prio), 77604746ed8SIngo Molnar }; 77704746ed8SIngo Molnar 778857b158dSPeter Zijlstra if (p->se.custom_slice) 779857b158dSPeter Zijlstra attr.sched_runtime = p->se.slice; 780857b158dSPeter Zijlstra 78104746ed8SIngo Molnar /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 78204746ed8SIngo Molnar if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 78304746ed8SIngo Molnar attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 78404746ed8SIngo Molnar policy &= ~SCHED_RESET_ON_FORK; 78504746ed8SIngo Molnar attr.sched_policy = policy; 78604746ed8SIngo Molnar } 78704746ed8SIngo Molnar 78804746ed8SIngo Molnar return __sched_setscheduler(p, &attr, check, true); 78904746ed8SIngo Molnar } 79004746ed8SIngo Molnar /** 79104746ed8SIngo Molnar * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 79204746ed8SIngo Molnar * @p: the task in question. 79304746ed8SIngo Molnar * @policy: new policy. 79404746ed8SIngo Molnar * @param: structure containing the new RT priority. 79504746ed8SIngo Molnar * 79604746ed8SIngo Molnar * Use sched_set_fifo(), read its comment. 79704746ed8SIngo Molnar * 79804746ed8SIngo Molnar * Return: 0 on success. An error code otherwise. 79904746ed8SIngo Molnar * 80004746ed8SIngo Molnar * NOTE that the task may be already dead. 80104746ed8SIngo Molnar */ 80204746ed8SIngo Molnar int sched_setscheduler(struct task_struct *p, int policy, 80304746ed8SIngo Molnar const struct sched_param *param) 80404746ed8SIngo Molnar { 80504746ed8SIngo Molnar return _sched_setscheduler(p, policy, param, true); 80604746ed8SIngo Molnar } 80704746ed8SIngo Molnar 80804746ed8SIngo Molnar int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 80904746ed8SIngo Molnar { 81004746ed8SIngo Molnar return __sched_setscheduler(p, attr, true, true); 81104746ed8SIngo Molnar } 81204746ed8SIngo Molnar 81304746ed8SIngo Molnar int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 81404746ed8SIngo Molnar { 81504746ed8SIngo Molnar return __sched_setscheduler(p, attr, false, true); 81604746ed8SIngo Molnar } 81704746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_setattr_nocheck); 81804746ed8SIngo Molnar 81904746ed8SIngo Molnar /** 820402de7fcSIngo Molnar * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernel-space. 82104746ed8SIngo Molnar * @p: the task in question. 82204746ed8SIngo Molnar * @policy: new policy. 82304746ed8SIngo Molnar * @param: structure containing the new RT priority. 82404746ed8SIngo Molnar * 82504746ed8SIngo Molnar * Just like sched_setscheduler, only don't bother checking if the 82604746ed8SIngo Molnar * current context has permission. For example, this is needed in 82704746ed8SIngo Molnar * stop_machine(): we create temporary high priority worker threads, 82804746ed8SIngo Molnar * but our caller might not have that capability. 82904746ed8SIngo Molnar * 83004746ed8SIngo Molnar * Return: 0 on success. An error code otherwise. 83104746ed8SIngo Molnar */ 83204746ed8SIngo Molnar int sched_setscheduler_nocheck(struct task_struct *p, int policy, 83304746ed8SIngo Molnar const struct sched_param *param) 83404746ed8SIngo Molnar { 83504746ed8SIngo Molnar return _sched_setscheduler(p, policy, param, false); 83604746ed8SIngo Molnar } 83704746ed8SIngo Molnar 83804746ed8SIngo Molnar /* 83904746ed8SIngo Molnar * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally 84004746ed8SIngo Molnar * incapable of resource management, which is the one thing an OS really should 84104746ed8SIngo Molnar * be doing. 84204746ed8SIngo Molnar * 84304746ed8SIngo Molnar * This is of course the reason it is limited to privileged users only. 84404746ed8SIngo Molnar * 84504746ed8SIngo Molnar * Worse still; it is fundamentally impossible to compose static priority 84604746ed8SIngo Molnar * workloads. You cannot take two correctly working static prio workloads 84704746ed8SIngo Molnar * and smash them together and still expect them to work. 84804746ed8SIngo Molnar * 84904746ed8SIngo Molnar * For this reason 'all' FIFO tasks the kernel creates are basically at: 85004746ed8SIngo Molnar * 85104746ed8SIngo Molnar * MAX_RT_PRIO / 2 85204746ed8SIngo Molnar * 85304746ed8SIngo Molnar * The administrator _MUST_ configure the system, the kernel simply doesn't 85404746ed8SIngo Molnar * know enough information to make a sensible choice. 85504746ed8SIngo Molnar */ 85604746ed8SIngo Molnar void sched_set_fifo(struct task_struct *p) 85704746ed8SIngo Molnar { 85804746ed8SIngo Molnar struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; 85904746ed8SIngo Molnar WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 86004746ed8SIngo Molnar } 86104746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_set_fifo); 86204746ed8SIngo Molnar 86304746ed8SIngo Molnar /* 86404746ed8SIngo Molnar * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. 86504746ed8SIngo Molnar */ 86604746ed8SIngo Molnar void sched_set_fifo_low(struct task_struct *p) 86704746ed8SIngo Molnar { 86804746ed8SIngo Molnar struct sched_param sp = { .sched_priority = 1 }; 86904746ed8SIngo Molnar WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 87004746ed8SIngo Molnar } 87104746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_set_fifo_low); 87204746ed8SIngo Molnar 87304746ed8SIngo Molnar void sched_set_normal(struct task_struct *p, int nice) 87404746ed8SIngo Molnar { 87504746ed8SIngo Molnar struct sched_attr attr = { 87604746ed8SIngo Molnar .sched_policy = SCHED_NORMAL, 87704746ed8SIngo Molnar .sched_nice = nice, 87804746ed8SIngo Molnar }; 87904746ed8SIngo Molnar WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); 88004746ed8SIngo Molnar } 88104746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_set_normal); 88204746ed8SIngo Molnar 88304746ed8SIngo Molnar static int 88404746ed8SIngo Molnar do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 88504746ed8SIngo Molnar { 88604746ed8SIngo Molnar struct sched_param lparam; 88704746ed8SIngo Molnar 88804746ed8SIngo Molnar if (!param || pid < 0) 88904746ed8SIngo Molnar return -EINVAL; 89004746ed8SIngo Molnar if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 89104746ed8SIngo Molnar return -EFAULT; 89204746ed8SIngo Molnar 89304746ed8SIngo Molnar CLASS(find_get_task, p)(pid); 89404746ed8SIngo Molnar if (!p) 89504746ed8SIngo Molnar return -ESRCH; 89604746ed8SIngo Molnar 89704746ed8SIngo Molnar return sched_setscheduler(p, policy, &lparam); 89804746ed8SIngo Molnar } 89904746ed8SIngo Molnar 90004746ed8SIngo Molnar /* 90104746ed8SIngo Molnar * Mimics kernel/events/core.c perf_copy_attr(). 90204746ed8SIngo Molnar */ 90304746ed8SIngo Molnar static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 90404746ed8SIngo Molnar { 90504746ed8SIngo Molnar u32 size; 90604746ed8SIngo Molnar int ret; 90704746ed8SIngo Molnar 90804746ed8SIngo Molnar /* Zero the full structure, so that a short copy will be nice: */ 90904746ed8SIngo Molnar memset(attr, 0, sizeof(*attr)); 91004746ed8SIngo Molnar 91104746ed8SIngo Molnar ret = get_user(size, &uattr->size); 91204746ed8SIngo Molnar if (ret) 91304746ed8SIngo Molnar return ret; 91404746ed8SIngo Molnar 91504746ed8SIngo Molnar /* ABI compatibility quirk: */ 91604746ed8SIngo Molnar if (!size) 91704746ed8SIngo Molnar size = SCHED_ATTR_SIZE_VER0; 91804746ed8SIngo Molnar if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 91904746ed8SIngo Molnar goto err_size; 92004746ed8SIngo Molnar 92104746ed8SIngo Molnar ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 92204746ed8SIngo Molnar if (ret) { 92304746ed8SIngo Molnar if (ret == -E2BIG) 92404746ed8SIngo Molnar goto err_size; 92504746ed8SIngo Molnar return ret; 92604746ed8SIngo Molnar } 92704746ed8SIngo Molnar 92804746ed8SIngo Molnar if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 92904746ed8SIngo Molnar size < SCHED_ATTR_SIZE_VER1) 93004746ed8SIngo Molnar return -EINVAL; 93104746ed8SIngo Molnar 93204746ed8SIngo Molnar /* 93304746ed8SIngo Molnar * XXX: Do we want to be lenient like existing syscalls; or do we want 93404746ed8SIngo Molnar * to be strict and return an error on out-of-bounds values? 93504746ed8SIngo Molnar */ 93604746ed8SIngo Molnar attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 93704746ed8SIngo Molnar 93804746ed8SIngo Molnar return 0; 93904746ed8SIngo Molnar 94004746ed8SIngo Molnar err_size: 94104746ed8SIngo Molnar put_user(sizeof(*attr), &uattr->size); 94204746ed8SIngo Molnar return -E2BIG; 94304746ed8SIngo Molnar } 94404746ed8SIngo Molnar 94504746ed8SIngo Molnar static void get_params(struct task_struct *p, struct sched_attr *attr) 94604746ed8SIngo Molnar { 947857b158dSPeter Zijlstra if (task_has_dl_policy(p)) { 94804746ed8SIngo Molnar __getparam_dl(p, attr); 949857b158dSPeter Zijlstra } else if (task_has_rt_policy(p)) { 95004746ed8SIngo Molnar attr->sched_priority = p->rt_priority; 951857b158dSPeter Zijlstra } else { 95204746ed8SIngo Molnar attr->sched_nice = task_nice(p); 953857b158dSPeter Zijlstra attr->sched_runtime = p->se.slice; 954857b158dSPeter Zijlstra } 95504746ed8SIngo Molnar } 95604746ed8SIngo Molnar 95704746ed8SIngo Molnar /** 95804746ed8SIngo Molnar * sys_sched_setscheduler - set/change the scheduler policy and RT priority 95904746ed8SIngo Molnar * @pid: the pid in question. 96004746ed8SIngo Molnar * @policy: new policy. 96104746ed8SIngo Molnar * @param: structure containing the new RT priority. 96204746ed8SIngo Molnar * 96304746ed8SIngo Molnar * Return: 0 on success. An error code otherwise. 96404746ed8SIngo Molnar */ 96504746ed8SIngo Molnar SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 96604746ed8SIngo Molnar { 96704746ed8SIngo Molnar if (policy < 0) 96804746ed8SIngo Molnar return -EINVAL; 96904746ed8SIngo Molnar 97004746ed8SIngo Molnar return do_sched_setscheduler(pid, policy, param); 97104746ed8SIngo Molnar } 97204746ed8SIngo Molnar 97304746ed8SIngo Molnar /** 97404746ed8SIngo Molnar * sys_sched_setparam - set/change the RT priority of a thread 97504746ed8SIngo Molnar * @pid: the pid in question. 97604746ed8SIngo Molnar * @param: structure containing the new RT priority. 97704746ed8SIngo Molnar * 97804746ed8SIngo Molnar * Return: 0 on success. An error code otherwise. 97904746ed8SIngo Molnar */ 98004746ed8SIngo Molnar SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 98104746ed8SIngo Molnar { 98204746ed8SIngo Molnar return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 98304746ed8SIngo Molnar } 98404746ed8SIngo Molnar 98504746ed8SIngo Molnar /** 98604746ed8SIngo Molnar * sys_sched_setattr - same as above, but with extended sched_attr 98704746ed8SIngo Molnar * @pid: the pid in question. 98804746ed8SIngo Molnar * @uattr: structure containing the extended parameters. 98904746ed8SIngo Molnar * @flags: for future extension. 99004746ed8SIngo Molnar */ 99104746ed8SIngo Molnar SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 99204746ed8SIngo Molnar unsigned int, flags) 99304746ed8SIngo Molnar { 99404746ed8SIngo Molnar struct sched_attr attr; 99504746ed8SIngo Molnar int retval; 99604746ed8SIngo Molnar 99704746ed8SIngo Molnar if (!uattr || pid < 0 || flags) 99804746ed8SIngo Molnar return -EINVAL; 99904746ed8SIngo Molnar 100004746ed8SIngo Molnar retval = sched_copy_attr(uattr, &attr); 100104746ed8SIngo Molnar if (retval) 100204746ed8SIngo Molnar return retval; 100304746ed8SIngo Molnar 100404746ed8SIngo Molnar if ((int)attr.sched_policy < 0) 100504746ed8SIngo Molnar return -EINVAL; 100604746ed8SIngo Molnar if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 100704746ed8SIngo Molnar attr.sched_policy = SETPARAM_POLICY; 100804746ed8SIngo Molnar 100904746ed8SIngo Molnar CLASS(find_get_task, p)(pid); 101004746ed8SIngo Molnar if (!p) 101104746ed8SIngo Molnar return -ESRCH; 101204746ed8SIngo Molnar 101304746ed8SIngo Molnar if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS) 101404746ed8SIngo Molnar get_params(p, &attr); 101504746ed8SIngo Molnar 101604746ed8SIngo Molnar return sched_setattr(p, &attr); 101704746ed8SIngo Molnar } 101804746ed8SIngo Molnar 101904746ed8SIngo Molnar /** 102004746ed8SIngo Molnar * sys_sched_getscheduler - get the policy (scheduling class) of a thread 102104746ed8SIngo Molnar * @pid: the pid in question. 102204746ed8SIngo Molnar * 102304746ed8SIngo Molnar * Return: On success, the policy of the thread. Otherwise, a negative error 102404746ed8SIngo Molnar * code. 102504746ed8SIngo Molnar */ 102604746ed8SIngo Molnar SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 102704746ed8SIngo Molnar { 102804746ed8SIngo Molnar struct task_struct *p; 102904746ed8SIngo Molnar int retval; 103004746ed8SIngo Molnar 103104746ed8SIngo Molnar if (pid < 0) 103204746ed8SIngo Molnar return -EINVAL; 103304746ed8SIngo Molnar 103404746ed8SIngo Molnar guard(rcu)(); 103504746ed8SIngo Molnar p = find_process_by_pid(pid); 103604746ed8SIngo Molnar if (!p) 103704746ed8SIngo Molnar return -ESRCH; 103804746ed8SIngo Molnar 103904746ed8SIngo Molnar retval = security_task_getscheduler(p); 104004746ed8SIngo Molnar if (!retval) { 104104746ed8SIngo Molnar retval = p->policy; 104204746ed8SIngo Molnar if (p->sched_reset_on_fork) 104304746ed8SIngo Molnar retval |= SCHED_RESET_ON_FORK; 104404746ed8SIngo Molnar } 104504746ed8SIngo Molnar return retval; 104604746ed8SIngo Molnar } 104704746ed8SIngo Molnar 104804746ed8SIngo Molnar /** 104904746ed8SIngo Molnar * sys_sched_getparam - get the RT priority of a thread 105004746ed8SIngo Molnar * @pid: the pid in question. 105104746ed8SIngo Molnar * @param: structure containing the RT priority. 105204746ed8SIngo Molnar * 105304746ed8SIngo Molnar * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 105404746ed8SIngo Molnar * code. 105504746ed8SIngo Molnar */ 105604746ed8SIngo Molnar SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 105704746ed8SIngo Molnar { 105804746ed8SIngo Molnar struct sched_param lp = { .sched_priority = 0 }; 105904746ed8SIngo Molnar struct task_struct *p; 106004746ed8SIngo Molnar int retval; 106104746ed8SIngo Molnar 106204746ed8SIngo Molnar if (!param || pid < 0) 106304746ed8SIngo Molnar return -EINVAL; 106404746ed8SIngo Molnar 106504746ed8SIngo Molnar scoped_guard (rcu) { 106604746ed8SIngo Molnar p = find_process_by_pid(pid); 106704746ed8SIngo Molnar if (!p) 106804746ed8SIngo Molnar return -ESRCH; 106904746ed8SIngo Molnar 107004746ed8SIngo Molnar retval = security_task_getscheduler(p); 107104746ed8SIngo Molnar if (retval) 107204746ed8SIngo Molnar return retval; 107304746ed8SIngo Molnar 107404746ed8SIngo Molnar if (task_has_rt_policy(p)) 107504746ed8SIngo Molnar lp.sched_priority = p->rt_priority; 107604746ed8SIngo Molnar } 107704746ed8SIngo Molnar 107804746ed8SIngo Molnar /* 107904746ed8SIngo Molnar * This one might sleep, we cannot do it with a spinlock held ... 108004746ed8SIngo Molnar */ 108104746ed8SIngo Molnar return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 108204746ed8SIngo Molnar } 108304746ed8SIngo Molnar 108404746ed8SIngo Molnar /* 108504746ed8SIngo Molnar * Copy the kernel size attribute structure (which might be larger 108604746ed8SIngo Molnar * than what user-space knows about) to user-space. 108704746ed8SIngo Molnar * 108804746ed8SIngo Molnar * Note that all cases are valid: user-space buffer can be larger or 108904746ed8SIngo Molnar * smaller than the kernel-space buffer. The usual case is that both 109004746ed8SIngo Molnar * have the same size. 109104746ed8SIngo Molnar */ 109204746ed8SIngo Molnar static int 109304746ed8SIngo Molnar sched_attr_copy_to_user(struct sched_attr __user *uattr, 109404746ed8SIngo Molnar struct sched_attr *kattr, 109504746ed8SIngo Molnar unsigned int usize) 109604746ed8SIngo Molnar { 109704746ed8SIngo Molnar unsigned int ksize = sizeof(*kattr); 109804746ed8SIngo Molnar 109904746ed8SIngo Molnar if (!access_ok(uattr, usize)) 110004746ed8SIngo Molnar return -EFAULT; 110104746ed8SIngo Molnar 110204746ed8SIngo Molnar /* 110304746ed8SIngo Molnar * sched_getattr() ABI forwards and backwards compatibility: 110404746ed8SIngo Molnar * 110504746ed8SIngo Molnar * If usize == ksize then we just copy everything to user-space and all is good. 110604746ed8SIngo Molnar * 110704746ed8SIngo Molnar * If usize < ksize then we only copy as much as user-space has space for, 110804746ed8SIngo Molnar * this keeps ABI compatibility as well. We skip the rest. 110904746ed8SIngo Molnar * 111004746ed8SIngo Molnar * If usize > ksize then user-space is using a newer version of the ABI, 111104746ed8SIngo Molnar * which part the kernel doesn't know about. Just ignore it - tooling can 111204746ed8SIngo Molnar * detect the kernel's knowledge of attributes from the attr->size value 111304746ed8SIngo Molnar * which is set to ksize in this case. 111404746ed8SIngo Molnar */ 111504746ed8SIngo Molnar kattr->size = min(usize, ksize); 111604746ed8SIngo Molnar 111704746ed8SIngo Molnar if (copy_to_user(uattr, kattr, kattr->size)) 111804746ed8SIngo Molnar return -EFAULT; 111904746ed8SIngo Molnar 112004746ed8SIngo Molnar return 0; 112104746ed8SIngo Molnar } 112204746ed8SIngo Molnar 112304746ed8SIngo Molnar /** 112404746ed8SIngo Molnar * sys_sched_getattr - similar to sched_getparam, but with sched_attr 112504746ed8SIngo Molnar * @pid: the pid in question. 112604746ed8SIngo Molnar * @uattr: structure containing the extended parameters. 112704746ed8SIngo Molnar * @usize: sizeof(attr) for fwd/bwd comp. 112804746ed8SIngo Molnar * @flags: for future extension. 112904746ed8SIngo Molnar */ 113004746ed8SIngo Molnar SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 113104746ed8SIngo Molnar unsigned int, usize, unsigned int, flags) 113204746ed8SIngo Molnar { 113304746ed8SIngo Molnar struct sched_attr kattr = { }; 113404746ed8SIngo Molnar struct task_struct *p; 113504746ed8SIngo Molnar int retval; 113604746ed8SIngo Molnar 113704746ed8SIngo Molnar if (!uattr || pid < 0 || usize > PAGE_SIZE || 113804746ed8SIngo Molnar usize < SCHED_ATTR_SIZE_VER0 || flags) 113904746ed8SIngo Molnar return -EINVAL; 114004746ed8SIngo Molnar 114104746ed8SIngo Molnar scoped_guard (rcu) { 114204746ed8SIngo Molnar p = find_process_by_pid(pid); 114304746ed8SIngo Molnar if (!p) 114404746ed8SIngo Molnar return -ESRCH; 114504746ed8SIngo Molnar 114604746ed8SIngo Molnar retval = security_task_getscheduler(p); 114704746ed8SIngo Molnar if (retval) 114804746ed8SIngo Molnar return retval; 114904746ed8SIngo Molnar 115004746ed8SIngo Molnar kattr.sched_policy = p->policy; 115104746ed8SIngo Molnar if (p->sched_reset_on_fork) 115204746ed8SIngo Molnar kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 115304746ed8SIngo Molnar get_params(p, &kattr); 115404746ed8SIngo Molnar kattr.sched_flags &= SCHED_FLAG_ALL; 115504746ed8SIngo Molnar 115604746ed8SIngo Molnar #ifdef CONFIG_UCLAMP_TASK 115704746ed8SIngo Molnar /* 115804746ed8SIngo Molnar * This could race with another potential updater, but this is fine 115904746ed8SIngo Molnar * because it'll correctly read the old or the new value. We don't need 116004746ed8SIngo Molnar * to guarantee who wins the race as long as it doesn't return garbage. 116104746ed8SIngo Molnar */ 116204746ed8SIngo Molnar kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 116304746ed8SIngo Molnar kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 116404746ed8SIngo Molnar #endif 116504746ed8SIngo Molnar } 116604746ed8SIngo Molnar 116704746ed8SIngo Molnar return sched_attr_copy_to_user(uattr, &kattr, usize); 116804746ed8SIngo Molnar } 116904746ed8SIngo Molnar 117004746ed8SIngo Molnar #ifdef CONFIG_SMP 117104746ed8SIngo Molnar int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 117204746ed8SIngo Molnar { 117304746ed8SIngo Molnar /* 117404746ed8SIngo Molnar * If the task isn't a deadline task or admission control is 117504746ed8SIngo Molnar * disabled then we don't care about affinity changes. 117604746ed8SIngo Molnar */ 117704746ed8SIngo Molnar if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) 117804746ed8SIngo Molnar return 0; 117904746ed8SIngo Molnar 118004746ed8SIngo Molnar /* 118104746ed8SIngo Molnar * Since bandwidth control happens on root_domain basis, 118204746ed8SIngo Molnar * if admission test is enabled, we only admit -deadline 118304746ed8SIngo Molnar * tasks allowed to run on all the CPUs in the task's 118404746ed8SIngo Molnar * root_domain. 118504746ed8SIngo Molnar */ 118604746ed8SIngo Molnar guard(rcu)(); 118704746ed8SIngo Molnar if (!cpumask_subset(task_rq(p)->rd->span, mask)) 118804746ed8SIngo Molnar return -EBUSY; 118904746ed8SIngo Molnar 119004746ed8SIngo Molnar return 0; 119104746ed8SIngo Molnar } 119204746ed8SIngo Molnar #endif /* CONFIG_SMP */ 119304746ed8SIngo Molnar 119404746ed8SIngo Molnar int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) 119504746ed8SIngo Molnar { 119604746ed8SIngo Molnar int retval; 119704746ed8SIngo Molnar cpumask_var_t cpus_allowed, new_mask; 119804746ed8SIngo Molnar 119904746ed8SIngo Molnar if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) 120004746ed8SIngo Molnar return -ENOMEM; 120104746ed8SIngo Molnar 120204746ed8SIngo Molnar if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 120304746ed8SIngo Molnar retval = -ENOMEM; 120404746ed8SIngo Molnar goto out_free_cpus_allowed; 120504746ed8SIngo Molnar } 120604746ed8SIngo Molnar 120704746ed8SIngo Molnar cpuset_cpus_allowed(p, cpus_allowed); 120804746ed8SIngo Molnar cpumask_and(new_mask, ctx->new_mask, cpus_allowed); 120904746ed8SIngo Molnar 121004746ed8SIngo Molnar ctx->new_mask = new_mask; 121104746ed8SIngo Molnar ctx->flags |= SCA_CHECK; 121204746ed8SIngo Molnar 121304746ed8SIngo Molnar retval = dl_task_check_affinity(p, new_mask); 121404746ed8SIngo Molnar if (retval) 121504746ed8SIngo Molnar goto out_free_new_mask; 121604746ed8SIngo Molnar 121704746ed8SIngo Molnar retval = __set_cpus_allowed_ptr(p, ctx); 121804746ed8SIngo Molnar if (retval) 121904746ed8SIngo Molnar goto out_free_new_mask; 122004746ed8SIngo Molnar 122104746ed8SIngo Molnar cpuset_cpus_allowed(p, cpus_allowed); 122204746ed8SIngo Molnar if (!cpumask_subset(new_mask, cpus_allowed)) { 122304746ed8SIngo Molnar /* 122404746ed8SIngo Molnar * We must have raced with a concurrent cpuset update. 122504746ed8SIngo Molnar * Just reset the cpumask to the cpuset's cpus_allowed. 122604746ed8SIngo Molnar */ 122704746ed8SIngo Molnar cpumask_copy(new_mask, cpus_allowed); 122804746ed8SIngo Molnar 122904746ed8SIngo Molnar /* 123004746ed8SIngo Molnar * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr() 123104746ed8SIngo Molnar * will restore the previous user_cpus_ptr value. 123204746ed8SIngo Molnar * 123304746ed8SIngo Molnar * In the unlikely event a previous user_cpus_ptr exists, 123404746ed8SIngo Molnar * we need to further restrict the mask to what is allowed 123504746ed8SIngo Molnar * by that old user_cpus_ptr. 123604746ed8SIngo Molnar */ 123704746ed8SIngo Molnar if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) { 123804746ed8SIngo Molnar bool empty = !cpumask_and(new_mask, new_mask, 123904746ed8SIngo Molnar ctx->user_mask); 124004746ed8SIngo Molnar 124104746ed8SIngo Molnar if (WARN_ON_ONCE(empty)) 124204746ed8SIngo Molnar cpumask_copy(new_mask, cpus_allowed); 124304746ed8SIngo Molnar } 124404746ed8SIngo Molnar __set_cpus_allowed_ptr(p, ctx); 124504746ed8SIngo Molnar retval = -EINVAL; 124604746ed8SIngo Molnar } 124704746ed8SIngo Molnar 124804746ed8SIngo Molnar out_free_new_mask: 124904746ed8SIngo Molnar free_cpumask_var(new_mask); 125004746ed8SIngo Molnar out_free_cpus_allowed: 125104746ed8SIngo Molnar free_cpumask_var(cpus_allowed); 125204746ed8SIngo Molnar return retval; 125304746ed8SIngo Molnar } 125404746ed8SIngo Molnar 125504746ed8SIngo Molnar long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 125604746ed8SIngo Molnar { 125704746ed8SIngo Molnar struct affinity_context ac; 125804746ed8SIngo Molnar struct cpumask *user_mask; 125904746ed8SIngo Molnar int retval; 126004746ed8SIngo Molnar 126104746ed8SIngo Molnar CLASS(find_get_task, p)(pid); 126204746ed8SIngo Molnar if (!p) 126304746ed8SIngo Molnar return -ESRCH; 126404746ed8SIngo Molnar 126504746ed8SIngo Molnar if (p->flags & PF_NO_SETAFFINITY) 126604746ed8SIngo Molnar return -EINVAL; 126704746ed8SIngo Molnar 126804746ed8SIngo Molnar if (!check_same_owner(p)) { 126904746ed8SIngo Molnar guard(rcu)(); 127004746ed8SIngo Molnar if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) 127104746ed8SIngo Molnar return -EPERM; 127204746ed8SIngo Molnar } 127304746ed8SIngo Molnar 127404746ed8SIngo Molnar retval = security_task_setscheduler(p); 127504746ed8SIngo Molnar if (retval) 127604746ed8SIngo Molnar return retval; 127704746ed8SIngo Molnar 127804746ed8SIngo Molnar /* 127904746ed8SIngo Molnar * With non-SMP configs, user_cpus_ptr/user_mask isn't used and 128004746ed8SIngo Molnar * alloc_user_cpus_ptr() returns NULL. 128104746ed8SIngo Molnar */ 128204746ed8SIngo Molnar user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE); 128304746ed8SIngo Molnar if (user_mask) { 128404746ed8SIngo Molnar cpumask_copy(user_mask, in_mask); 128504746ed8SIngo Molnar } else if (IS_ENABLED(CONFIG_SMP)) { 128604746ed8SIngo Molnar return -ENOMEM; 128704746ed8SIngo Molnar } 128804746ed8SIngo Molnar 128904746ed8SIngo Molnar ac = (struct affinity_context){ 129004746ed8SIngo Molnar .new_mask = in_mask, 129104746ed8SIngo Molnar .user_mask = user_mask, 129204746ed8SIngo Molnar .flags = SCA_USER, 129304746ed8SIngo Molnar }; 129404746ed8SIngo Molnar 129504746ed8SIngo Molnar retval = __sched_setaffinity(p, &ac); 129604746ed8SIngo Molnar kfree(ac.user_mask); 129704746ed8SIngo Molnar 129804746ed8SIngo Molnar return retval; 129904746ed8SIngo Molnar } 130004746ed8SIngo Molnar 130104746ed8SIngo Molnar static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 130204746ed8SIngo Molnar struct cpumask *new_mask) 130304746ed8SIngo Molnar { 130404746ed8SIngo Molnar if (len < cpumask_size()) 130504746ed8SIngo Molnar cpumask_clear(new_mask); 130604746ed8SIngo Molnar else if (len > cpumask_size()) 130704746ed8SIngo Molnar len = cpumask_size(); 130804746ed8SIngo Molnar 130904746ed8SIngo Molnar return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 131004746ed8SIngo Molnar } 131104746ed8SIngo Molnar 131204746ed8SIngo Molnar /** 131304746ed8SIngo Molnar * sys_sched_setaffinity - set the CPU affinity of a process 131404746ed8SIngo Molnar * @pid: pid of the process 131504746ed8SIngo Molnar * @len: length in bytes of the bitmask pointed to by user_mask_ptr 131604746ed8SIngo Molnar * @user_mask_ptr: user-space pointer to the new CPU mask 131704746ed8SIngo Molnar * 131804746ed8SIngo Molnar * Return: 0 on success. An error code otherwise. 131904746ed8SIngo Molnar */ 132004746ed8SIngo Molnar SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 132104746ed8SIngo Molnar unsigned long __user *, user_mask_ptr) 132204746ed8SIngo Molnar { 132304746ed8SIngo Molnar cpumask_var_t new_mask; 132404746ed8SIngo Molnar int retval; 132504746ed8SIngo Molnar 132604746ed8SIngo Molnar if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 132704746ed8SIngo Molnar return -ENOMEM; 132804746ed8SIngo Molnar 132904746ed8SIngo Molnar retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 133004746ed8SIngo Molnar if (retval == 0) 133104746ed8SIngo Molnar retval = sched_setaffinity(pid, new_mask); 133204746ed8SIngo Molnar free_cpumask_var(new_mask); 133304746ed8SIngo Molnar return retval; 133404746ed8SIngo Molnar } 133504746ed8SIngo Molnar 133604746ed8SIngo Molnar long sched_getaffinity(pid_t pid, struct cpumask *mask) 133704746ed8SIngo Molnar { 133804746ed8SIngo Molnar struct task_struct *p; 133904746ed8SIngo Molnar int retval; 134004746ed8SIngo Molnar 134104746ed8SIngo Molnar guard(rcu)(); 134204746ed8SIngo Molnar p = find_process_by_pid(pid); 134304746ed8SIngo Molnar if (!p) 134404746ed8SIngo Molnar return -ESRCH; 134504746ed8SIngo Molnar 134604746ed8SIngo Molnar retval = security_task_getscheduler(p); 134704746ed8SIngo Molnar if (retval) 134804746ed8SIngo Molnar return retval; 134904746ed8SIngo Molnar 135004746ed8SIngo Molnar guard(raw_spinlock_irqsave)(&p->pi_lock); 135104746ed8SIngo Molnar cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 135204746ed8SIngo Molnar 135304746ed8SIngo Molnar return 0; 135404746ed8SIngo Molnar } 135504746ed8SIngo Molnar 135604746ed8SIngo Molnar /** 135704746ed8SIngo Molnar * sys_sched_getaffinity - get the CPU affinity of a process 135804746ed8SIngo Molnar * @pid: pid of the process 135904746ed8SIngo Molnar * @len: length in bytes of the bitmask pointed to by user_mask_ptr 136004746ed8SIngo Molnar * @user_mask_ptr: user-space pointer to hold the current CPU mask 136104746ed8SIngo Molnar * 136204746ed8SIngo Molnar * Return: size of CPU mask copied to user_mask_ptr on success. An 136304746ed8SIngo Molnar * error code otherwise. 136404746ed8SIngo Molnar */ 136504746ed8SIngo Molnar SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 136604746ed8SIngo Molnar unsigned long __user *, user_mask_ptr) 136704746ed8SIngo Molnar { 136804746ed8SIngo Molnar int ret; 136904746ed8SIngo Molnar cpumask_var_t mask; 137004746ed8SIngo Molnar 137104746ed8SIngo Molnar if ((len * BITS_PER_BYTE) < nr_cpu_ids) 137204746ed8SIngo Molnar return -EINVAL; 137304746ed8SIngo Molnar if (len & (sizeof(unsigned long)-1)) 137404746ed8SIngo Molnar return -EINVAL; 137504746ed8SIngo Molnar 137604746ed8SIngo Molnar if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 137704746ed8SIngo Molnar return -ENOMEM; 137804746ed8SIngo Molnar 137904746ed8SIngo Molnar ret = sched_getaffinity(pid, mask); 138004746ed8SIngo Molnar if (ret == 0) { 138104746ed8SIngo Molnar unsigned int retlen = min(len, cpumask_size()); 138204746ed8SIngo Molnar 138304746ed8SIngo Molnar if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen)) 138404746ed8SIngo Molnar ret = -EFAULT; 138504746ed8SIngo Molnar else 138604746ed8SIngo Molnar ret = retlen; 138704746ed8SIngo Molnar } 138804746ed8SIngo Molnar free_cpumask_var(mask); 138904746ed8SIngo Molnar 139004746ed8SIngo Molnar return ret; 139104746ed8SIngo Molnar } 139204746ed8SIngo Molnar 139304746ed8SIngo Molnar static void do_sched_yield(void) 139404746ed8SIngo Molnar { 139504746ed8SIngo Molnar struct rq_flags rf; 139604746ed8SIngo Molnar struct rq *rq; 139704746ed8SIngo Molnar 139804746ed8SIngo Molnar rq = this_rq_lock_irq(&rf); 139904746ed8SIngo Molnar 140004746ed8SIngo Molnar schedstat_inc(rq->yld_count); 140104746ed8SIngo Molnar current->sched_class->yield_task(rq); 140204746ed8SIngo Molnar 140304746ed8SIngo Molnar preempt_disable(); 140404746ed8SIngo Molnar rq_unlock_irq(rq, &rf); 140504746ed8SIngo Molnar sched_preempt_enable_no_resched(); 140604746ed8SIngo Molnar 140704746ed8SIngo Molnar schedule(); 140804746ed8SIngo Molnar } 140904746ed8SIngo Molnar 141004746ed8SIngo Molnar /** 141104746ed8SIngo Molnar * sys_sched_yield - yield the current processor to other threads. 141204746ed8SIngo Molnar * 141304746ed8SIngo Molnar * This function yields the current CPU to other tasks. If there are no 141404746ed8SIngo Molnar * other threads running on this CPU then this function will return. 141504746ed8SIngo Molnar * 141604746ed8SIngo Molnar * Return: 0. 141704746ed8SIngo Molnar */ 141804746ed8SIngo Molnar SYSCALL_DEFINE0(sched_yield) 141904746ed8SIngo Molnar { 142004746ed8SIngo Molnar do_sched_yield(); 142104746ed8SIngo Molnar return 0; 142204746ed8SIngo Molnar } 142304746ed8SIngo Molnar 142404746ed8SIngo Molnar /** 142504746ed8SIngo Molnar * yield - yield the current processor to other threads. 142604746ed8SIngo Molnar * 142704746ed8SIngo Molnar * Do not ever use this function, there's a 99% chance you're doing it wrong. 142804746ed8SIngo Molnar * 142904746ed8SIngo Molnar * The scheduler is at all times free to pick the calling task as the most 143004746ed8SIngo Molnar * eligible task to run, if removing the yield() call from your code breaks 143104746ed8SIngo Molnar * it, it's already broken. 143204746ed8SIngo Molnar * 143304746ed8SIngo Molnar * Typical broken usage is: 143404746ed8SIngo Molnar * 143504746ed8SIngo Molnar * while (!event) 143604746ed8SIngo Molnar * yield(); 143704746ed8SIngo Molnar * 143804746ed8SIngo Molnar * where one assumes that yield() will let 'the other' process run that will 143904746ed8SIngo Molnar * make event true. If the current task is a SCHED_FIFO task that will never 144004746ed8SIngo Molnar * happen. Never use yield() as a progress guarantee!! 144104746ed8SIngo Molnar * 144204746ed8SIngo Molnar * If you want to use yield() to wait for something, use wait_event(). 144304746ed8SIngo Molnar * If you want to use yield() to be 'nice' for others, use cond_resched(). 144404746ed8SIngo Molnar * If you still want to use yield(), do not! 144504746ed8SIngo Molnar */ 144604746ed8SIngo Molnar void __sched yield(void) 144704746ed8SIngo Molnar { 144804746ed8SIngo Molnar set_current_state(TASK_RUNNING); 144904746ed8SIngo Molnar do_sched_yield(); 145004746ed8SIngo Molnar } 145104746ed8SIngo Molnar EXPORT_SYMBOL(yield); 145204746ed8SIngo Molnar 145304746ed8SIngo Molnar /** 145404746ed8SIngo Molnar * yield_to - yield the current processor to another thread in 145504746ed8SIngo Molnar * your thread group, or accelerate that thread toward the 145604746ed8SIngo Molnar * processor it's on. 145704746ed8SIngo Molnar * @p: target task 145804746ed8SIngo Molnar * @preempt: whether task preemption is allowed or not 145904746ed8SIngo Molnar * 146004746ed8SIngo Molnar * It's the caller's job to ensure that the target task struct 146104746ed8SIngo Molnar * can't go away on us before we can do any checks. 146204746ed8SIngo Molnar * 146304746ed8SIngo Molnar * Return: 146404746ed8SIngo Molnar * true (>0) if we indeed boosted the target task. 146504746ed8SIngo Molnar * false (0) if we failed to boost the target. 146604746ed8SIngo Molnar * -ESRCH if there's no task to yield to. 146704746ed8SIngo Molnar */ 146804746ed8SIngo Molnar int __sched yield_to(struct task_struct *p, bool preempt) 146904746ed8SIngo Molnar { 147004746ed8SIngo Molnar struct task_struct *curr = current; 147104746ed8SIngo Molnar struct rq *rq, *p_rq; 147204746ed8SIngo Molnar int yielded = 0; 147304746ed8SIngo Molnar 147404746ed8SIngo Molnar scoped_guard (irqsave) { 147504746ed8SIngo Molnar rq = this_rq(); 147604746ed8SIngo Molnar 147704746ed8SIngo Molnar again: 147804746ed8SIngo Molnar p_rq = task_rq(p); 147904746ed8SIngo Molnar /* 148004746ed8SIngo Molnar * If we're the only runnable task on the rq and target rq also 148104746ed8SIngo Molnar * has only one task, there's absolutely no point in yielding. 148204746ed8SIngo Molnar */ 148304746ed8SIngo Molnar if (rq->nr_running == 1 && p_rq->nr_running == 1) 148404746ed8SIngo Molnar return -ESRCH; 148504746ed8SIngo Molnar 148604746ed8SIngo Molnar guard(double_rq_lock)(rq, p_rq); 148704746ed8SIngo Molnar if (task_rq(p) != p_rq) 148804746ed8SIngo Molnar goto again; 148904746ed8SIngo Molnar 149004746ed8SIngo Molnar if (!curr->sched_class->yield_to_task) 149104746ed8SIngo Molnar return 0; 149204746ed8SIngo Molnar 149304746ed8SIngo Molnar if (curr->sched_class != p->sched_class) 149404746ed8SIngo Molnar return 0; 149504746ed8SIngo Molnar 149604746ed8SIngo Molnar if (task_on_cpu(p_rq, p) || !task_is_running(p)) 149704746ed8SIngo Molnar return 0; 149804746ed8SIngo Molnar 149904746ed8SIngo Molnar yielded = curr->sched_class->yield_to_task(rq, p); 150004746ed8SIngo Molnar if (yielded) { 150104746ed8SIngo Molnar schedstat_inc(rq->yld_count); 150204746ed8SIngo Molnar /* 150304746ed8SIngo Molnar * Make p's CPU reschedule; pick_next_entity 150404746ed8SIngo Molnar * takes care of fairness. 150504746ed8SIngo Molnar */ 150604746ed8SIngo Molnar if (preempt && rq != p_rq) 150704746ed8SIngo Molnar resched_curr(p_rq); 150804746ed8SIngo Molnar } 150904746ed8SIngo Molnar } 151004746ed8SIngo Molnar 151104746ed8SIngo Molnar if (yielded) 151204746ed8SIngo Molnar schedule(); 151304746ed8SIngo Molnar 151404746ed8SIngo Molnar return yielded; 151504746ed8SIngo Molnar } 151604746ed8SIngo Molnar EXPORT_SYMBOL_GPL(yield_to); 151704746ed8SIngo Molnar 151804746ed8SIngo Molnar /** 151904746ed8SIngo Molnar * sys_sched_get_priority_max - return maximum RT priority. 152004746ed8SIngo Molnar * @policy: scheduling class. 152104746ed8SIngo Molnar * 152204746ed8SIngo Molnar * Return: On success, this syscall returns the maximum 152304746ed8SIngo Molnar * rt_priority that can be used by a given scheduling class. 152404746ed8SIngo Molnar * On failure, a negative error code is returned. 152504746ed8SIngo Molnar */ 152604746ed8SIngo Molnar SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 152704746ed8SIngo Molnar { 152804746ed8SIngo Molnar int ret = -EINVAL; 152904746ed8SIngo Molnar 153004746ed8SIngo Molnar switch (policy) { 153104746ed8SIngo Molnar case SCHED_FIFO: 153204746ed8SIngo Molnar case SCHED_RR: 153304746ed8SIngo Molnar ret = MAX_RT_PRIO-1; 153404746ed8SIngo Molnar break; 153504746ed8SIngo Molnar case SCHED_DEADLINE: 153604746ed8SIngo Molnar case SCHED_NORMAL: 153704746ed8SIngo Molnar case SCHED_BATCH: 153804746ed8SIngo Molnar case SCHED_IDLE: 1539f0e1a064STejun Heo case SCHED_EXT: 154004746ed8SIngo Molnar ret = 0; 154104746ed8SIngo Molnar break; 154204746ed8SIngo Molnar } 154304746ed8SIngo Molnar return ret; 154404746ed8SIngo Molnar } 154504746ed8SIngo Molnar 154604746ed8SIngo Molnar /** 154704746ed8SIngo Molnar * sys_sched_get_priority_min - return minimum RT priority. 154804746ed8SIngo Molnar * @policy: scheduling class. 154904746ed8SIngo Molnar * 155004746ed8SIngo Molnar * Return: On success, this syscall returns the minimum 155104746ed8SIngo Molnar * rt_priority that can be used by a given scheduling class. 155204746ed8SIngo Molnar * On failure, a negative error code is returned. 155304746ed8SIngo Molnar */ 155404746ed8SIngo Molnar SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 155504746ed8SIngo Molnar { 155604746ed8SIngo Molnar int ret = -EINVAL; 155704746ed8SIngo Molnar 155804746ed8SIngo Molnar switch (policy) { 155904746ed8SIngo Molnar case SCHED_FIFO: 156004746ed8SIngo Molnar case SCHED_RR: 156104746ed8SIngo Molnar ret = 1; 156204746ed8SIngo Molnar break; 156304746ed8SIngo Molnar case SCHED_DEADLINE: 156404746ed8SIngo Molnar case SCHED_NORMAL: 156504746ed8SIngo Molnar case SCHED_BATCH: 156604746ed8SIngo Molnar case SCHED_IDLE: 1567f0e1a064STejun Heo case SCHED_EXT: 156804746ed8SIngo Molnar ret = 0; 156904746ed8SIngo Molnar } 157004746ed8SIngo Molnar return ret; 157104746ed8SIngo Molnar } 157204746ed8SIngo Molnar 157304746ed8SIngo Molnar static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 157404746ed8SIngo Molnar { 157504746ed8SIngo Molnar unsigned int time_slice = 0; 157604746ed8SIngo Molnar int retval; 157704746ed8SIngo Molnar 157804746ed8SIngo Molnar if (pid < 0) 157904746ed8SIngo Molnar return -EINVAL; 158004746ed8SIngo Molnar 158104746ed8SIngo Molnar scoped_guard (rcu) { 158204746ed8SIngo Molnar struct task_struct *p = find_process_by_pid(pid); 158304746ed8SIngo Molnar if (!p) 158404746ed8SIngo Molnar return -ESRCH; 158504746ed8SIngo Molnar 158604746ed8SIngo Molnar retval = security_task_getscheduler(p); 158704746ed8SIngo Molnar if (retval) 158804746ed8SIngo Molnar return retval; 158904746ed8SIngo Molnar 159004746ed8SIngo Molnar scoped_guard (task_rq_lock, p) { 159104746ed8SIngo Molnar struct rq *rq = scope.rq; 159204746ed8SIngo Molnar if (p->sched_class->get_rr_interval) 159304746ed8SIngo Molnar time_slice = p->sched_class->get_rr_interval(rq, p); 159404746ed8SIngo Molnar } 159504746ed8SIngo Molnar } 159604746ed8SIngo Molnar 159704746ed8SIngo Molnar jiffies_to_timespec64(time_slice, t); 159804746ed8SIngo Molnar return 0; 159904746ed8SIngo Molnar } 160004746ed8SIngo Molnar 160104746ed8SIngo Molnar /** 1602402de7fcSIngo Molnar * sys_sched_rr_get_interval - return the default time-slice of a process. 160304746ed8SIngo Molnar * @pid: pid of the process. 1604402de7fcSIngo Molnar * @interval: userspace pointer to the time-slice value. 160504746ed8SIngo Molnar * 1606402de7fcSIngo Molnar * this syscall writes the default time-slice value of a given process 160704746ed8SIngo Molnar * into the user-space timespec buffer. A value of '0' means infinity. 160804746ed8SIngo Molnar * 1609402de7fcSIngo Molnar * Return: On success, 0 and the time-slice is in @interval. Otherwise, 161004746ed8SIngo Molnar * an error code. 161104746ed8SIngo Molnar */ 161204746ed8SIngo Molnar SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 161304746ed8SIngo Molnar struct __kernel_timespec __user *, interval) 161404746ed8SIngo Molnar { 161504746ed8SIngo Molnar struct timespec64 t; 161604746ed8SIngo Molnar int retval = sched_rr_get_interval(pid, &t); 161704746ed8SIngo Molnar 161804746ed8SIngo Molnar if (retval == 0) 161904746ed8SIngo Molnar retval = put_timespec64(&t, interval); 162004746ed8SIngo Molnar 162104746ed8SIngo Molnar return retval; 162204746ed8SIngo Molnar } 162304746ed8SIngo Molnar 162404746ed8SIngo Molnar #ifdef CONFIG_COMPAT_32BIT_TIME 162504746ed8SIngo Molnar SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 162604746ed8SIngo Molnar struct old_timespec32 __user *, interval) 162704746ed8SIngo Molnar { 162804746ed8SIngo Molnar struct timespec64 t; 162904746ed8SIngo Molnar int retval = sched_rr_get_interval(pid, &t); 163004746ed8SIngo Molnar 163104746ed8SIngo Molnar if (retval == 0) 163204746ed8SIngo Molnar retval = put_old_timespec32(&t, interval); 163304746ed8SIngo Molnar return retval; 163404746ed8SIngo Molnar } 163504746ed8SIngo Molnar #endif 1636