104746ed8SIngo Molnar // SPDX-License-Identifier: GPL-2.0-only 204746ed8SIngo Molnar /* 304746ed8SIngo Molnar * kernel/sched/syscalls.c 404746ed8SIngo Molnar * 504746ed8SIngo Molnar * Core kernel scheduler syscalls related code 604746ed8SIngo Molnar * 704746ed8SIngo Molnar * Copyright (C) 1991-2002 Linus Torvalds 804746ed8SIngo Molnar * Copyright (C) 1998-2024 Ingo Molnar, Red Hat 904746ed8SIngo Molnar */ 1004746ed8SIngo Molnar #include <linux/sched.h> 1104746ed8SIngo Molnar #include <linux/cpuset.h> 1204746ed8SIngo Molnar #include <linux/sched/debug.h> 1304746ed8SIngo Molnar 1404746ed8SIngo Molnar #include <uapi/linux/sched/types.h> 1504746ed8SIngo Molnar 1604746ed8SIngo Molnar #include "sched.h" 1704746ed8SIngo Molnar #include "autogroup.h" 1804746ed8SIngo Molnar 1904746ed8SIngo Molnar static inline int __normal_prio(int policy, int rt_prio, int nice) 2004746ed8SIngo Molnar { 2104746ed8SIngo Molnar int prio; 2204746ed8SIngo Molnar 2304746ed8SIngo Molnar if (dl_policy(policy)) 2404746ed8SIngo Molnar prio = MAX_DL_PRIO - 1; 2504746ed8SIngo Molnar else if (rt_policy(policy)) 2604746ed8SIngo Molnar prio = MAX_RT_PRIO - 1 - rt_prio; 2704746ed8SIngo Molnar else 2804746ed8SIngo Molnar prio = NICE_TO_PRIO(nice); 2904746ed8SIngo Molnar 3004746ed8SIngo Molnar return prio; 3104746ed8SIngo Molnar } 3204746ed8SIngo Molnar 3304746ed8SIngo Molnar /* 3404746ed8SIngo Molnar * Calculate the expected normal priority: i.e. priority 3504746ed8SIngo Molnar * without taking RT-inheritance into account. Might be 3604746ed8SIngo Molnar * boosted by interactivity modifiers. Changes upon fork, 3704746ed8SIngo Molnar * setprio syscalls, and whenever the interactivity 3804746ed8SIngo Molnar * estimator recalculates. 3904746ed8SIngo Molnar */ 4004746ed8SIngo Molnar static inline int normal_prio(struct task_struct *p) 4104746ed8SIngo Molnar { 4204746ed8SIngo Molnar return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); 4304746ed8SIngo Molnar } 4404746ed8SIngo Molnar 4504746ed8SIngo Molnar /* 4604746ed8SIngo Molnar * Calculate the current priority, i.e. the priority 4704746ed8SIngo Molnar * taken into account by the scheduler. This value might 4804746ed8SIngo Molnar * be boosted by RT tasks, or might be boosted by 4904746ed8SIngo Molnar * interactivity modifiers. Will be RT if the task got 5004746ed8SIngo Molnar * RT-boosted. If not then it returns p->normal_prio. 5104746ed8SIngo Molnar */ 5204746ed8SIngo Molnar static int effective_prio(struct task_struct *p) 5304746ed8SIngo Molnar { 5404746ed8SIngo Molnar p->normal_prio = normal_prio(p); 5504746ed8SIngo Molnar /* 5604746ed8SIngo Molnar * If we are RT tasks or we were boosted to RT priority, 5704746ed8SIngo Molnar * keep the priority unchanged. Otherwise, update priority 5804746ed8SIngo Molnar * to the normal priority: 5904746ed8SIngo Molnar */ 6004746ed8SIngo Molnar if (!rt_prio(p->prio)) 6104746ed8SIngo Molnar return p->normal_prio; 6204746ed8SIngo Molnar return p->prio; 6304746ed8SIngo Molnar } 6404746ed8SIngo Molnar 6504746ed8SIngo Molnar void set_user_nice(struct task_struct *p, long nice) 6604746ed8SIngo Molnar { 6704746ed8SIngo Molnar bool queued, running; 6804746ed8SIngo Molnar struct rq *rq; 6904746ed8SIngo Molnar int old_prio; 7004746ed8SIngo Molnar 7104746ed8SIngo Molnar if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 7204746ed8SIngo Molnar return; 7304746ed8SIngo Molnar /* 7404746ed8SIngo Molnar * We have to be careful, if called from sys_setpriority(), 7504746ed8SIngo Molnar * the task might be in the middle of scheduling on another CPU. 7604746ed8SIngo Molnar */ 7704746ed8SIngo Molnar CLASS(task_rq_lock, rq_guard)(p); 7804746ed8SIngo Molnar rq = rq_guard.rq; 7904746ed8SIngo Molnar 8004746ed8SIngo Molnar update_rq_clock(rq); 8104746ed8SIngo Molnar 8204746ed8SIngo Molnar /* 8304746ed8SIngo Molnar * The RT priorities are set via sched_setscheduler(), but we still 8404746ed8SIngo Molnar * allow the 'normal' nice value to be set - but as expected 8504746ed8SIngo Molnar * it won't have any effect on scheduling until the task is 8604746ed8SIngo Molnar * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 8704746ed8SIngo Molnar */ 8804746ed8SIngo Molnar if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 8904746ed8SIngo Molnar p->static_prio = NICE_TO_PRIO(nice); 9004746ed8SIngo Molnar return; 9104746ed8SIngo Molnar } 9204746ed8SIngo Molnar 9304746ed8SIngo Molnar queued = task_on_rq_queued(p); 9404746ed8SIngo Molnar running = task_current(rq, p); 9504746ed8SIngo Molnar if (queued) 9604746ed8SIngo Molnar dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 9704746ed8SIngo Molnar if (running) 9804746ed8SIngo Molnar put_prev_task(rq, p); 9904746ed8SIngo Molnar 10004746ed8SIngo Molnar p->static_prio = NICE_TO_PRIO(nice); 10104746ed8SIngo Molnar set_load_weight(p, true); 10204746ed8SIngo Molnar old_prio = p->prio; 10304746ed8SIngo Molnar p->prio = effective_prio(p); 10404746ed8SIngo Molnar 10504746ed8SIngo Molnar if (queued) 10604746ed8SIngo Molnar enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 10704746ed8SIngo Molnar if (running) 10804746ed8SIngo Molnar set_next_task(rq, p); 10904746ed8SIngo Molnar 11004746ed8SIngo Molnar /* 11104746ed8SIngo Molnar * If the task increased its priority or is running and 11204746ed8SIngo Molnar * lowered its priority, then reschedule its CPU: 11304746ed8SIngo Molnar */ 11404746ed8SIngo Molnar p->sched_class->prio_changed(rq, p, old_prio); 11504746ed8SIngo Molnar } 11604746ed8SIngo Molnar EXPORT_SYMBOL(set_user_nice); 11704746ed8SIngo Molnar 11804746ed8SIngo Molnar /* 11904746ed8SIngo Molnar * is_nice_reduction - check if nice value is an actual reduction 12004746ed8SIngo Molnar * 12104746ed8SIngo Molnar * Similar to can_nice() but does not perform a capability check. 12204746ed8SIngo Molnar * 12304746ed8SIngo Molnar * @p: task 12404746ed8SIngo Molnar * @nice: nice value 12504746ed8SIngo Molnar */ 12604746ed8SIngo Molnar static bool is_nice_reduction(const struct task_struct *p, const int nice) 12704746ed8SIngo Molnar { 12804746ed8SIngo Molnar /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 12904746ed8SIngo Molnar int nice_rlim = nice_to_rlimit(nice); 13004746ed8SIngo Molnar 13104746ed8SIngo Molnar return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); 13204746ed8SIngo Molnar } 13304746ed8SIngo Molnar 13404746ed8SIngo Molnar /* 13504746ed8SIngo Molnar * can_nice - check if a task can reduce its nice value 13604746ed8SIngo Molnar * @p: task 13704746ed8SIngo Molnar * @nice: nice value 13804746ed8SIngo Molnar */ 13904746ed8SIngo Molnar int can_nice(const struct task_struct *p, const int nice) 14004746ed8SIngo Molnar { 14104746ed8SIngo Molnar return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); 14204746ed8SIngo Molnar } 14304746ed8SIngo Molnar 14404746ed8SIngo Molnar #ifdef __ARCH_WANT_SYS_NICE 14504746ed8SIngo Molnar 14604746ed8SIngo Molnar /* 14704746ed8SIngo Molnar * sys_nice - change the priority of the current process. 14804746ed8SIngo Molnar * @increment: priority increment 14904746ed8SIngo Molnar * 15004746ed8SIngo Molnar * sys_setpriority is a more generic, but much slower function that 15104746ed8SIngo Molnar * does similar things. 15204746ed8SIngo Molnar */ 15304746ed8SIngo Molnar SYSCALL_DEFINE1(nice, int, increment) 15404746ed8SIngo Molnar { 15504746ed8SIngo Molnar long nice, retval; 15604746ed8SIngo Molnar 15704746ed8SIngo Molnar /* 15804746ed8SIngo Molnar * Setpriority might change our priority at the same moment. 15904746ed8SIngo Molnar * We don't have to worry. Conceptually one call occurs first 16004746ed8SIngo Molnar * and we have a single winner. 16104746ed8SIngo Molnar */ 16204746ed8SIngo Molnar increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 16304746ed8SIngo Molnar nice = task_nice(current) + increment; 16404746ed8SIngo Molnar 16504746ed8SIngo Molnar nice = clamp_val(nice, MIN_NICE, MAX_NICE); 16604746ed8SIngo Molnar if (increment < 0 && !can_nice(current, nice)) 16704746ed8SIngo Molnar return -EPERM; 16804746ed8SIngo Molnar 16904746ed8SIngo Molnar retval = security_task_setnice(current, nice); 17004746ed8SIngo Molnar if (retval) 17104746ed8SIngo Molnar return retval; 17204746ed8SIngo Molnar 17304746ed8SIngo Molnar set_user_nice(current, nice); 17404746ed8SIngo Molnar return 0; 17504746ed8SIngo Molnar } 17604746ed8SIngo Molnar 17704746ed8SIngo Molnar #endif 17804746ed8SIngo Molnar 17904746ed8SIngo Molnar /** 18004746ed8SIngo Molnar * task_prio - return the priority value of a given task. 18104746ed8SIngo Molnar * @p: the task in question. 18204746ed8SIngo Molnar * 18304746ed8SIngo Molnar * Return: The priority value as seen by users in /proc. 18404746ed8SIngo Molnar * 18504746ed8SIngo Molnar * sched policy return value kernel prio user prio/nice 18604746ed8SIngo Molnar * 18704746ed8SIngo Molnar * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19] 18804746ed8SIngo Molnar * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99] 18904746ed8SIngo Molnar * deadline -101 -1 0 19004746ed8SIngo Molnar */ 19104746ed8SIngo Molnar int task_prio(const struct task_struct *p) 19204746ed8SIngo Molnar { 19304746ed8SIngo Molnar return p->prio - MAX_RT_PRIO; 19404746ed8SIngo Molnar } 19504746ed8SIngo Molnar 19604746ed8SIngo Molnar /** 19704746ed8SIngo Molnar * idle_cpu - is a given CPU idle currently? 19804746ed8SIngo Molnar * @cpu: the processor in question. 19904746ed8SIngo Molnar * 20004746ed8SIngo Molnar * Return: 1 if the CPU is currently idle. 0 otherwise. 20104746ed8SIngo Molnar */ 20204746ed8SIngo Molnar int idle_cpu(int cpu) 20304746ed8SIngo Molnar { 20404746ed8SIngo Molnar struct rq *rq = cpu_rq(cpu); 20504746ed8SIngo Molnar 20604746ed8SIngo Molnar if (rq->curr != rq->idle) 20704746ed8SIngo Molnar return 0; 20804746ed8SIngo Molnar 20904746ed8SIngo Molnar if (rq->nr_running) 21004746ed8SIngo Molnar return 0; 21104746ed8SIngo Molnar 21204746ed8SIngo Molnar #ifdef CONFIG_SMP 21304746ed8SIngo Molnar if (rq->ttwu_pending) 21404746ed8SIngo Molnar return 0; 21504746ed8SIngo Molnar #endif 21604746ed8SIngo Molnar 21704746ed8SIngo Molnar return 1; 21804746ed8SIngo Molnar } 21904746ed8SIngo Molnar 22004746ed8SIngo Molnar /** 22104746ed8SIngo Molnar * available_idle_cpu - is a given CPU idle for enqueuing work. 22204746ed8SIngo Molnar * @cpu: the CPU in question. 22304746ed8SIngo Molnar * 22404746ed8SIngo Molnar * Return: 1 if the CPU is currently idle. 0 otherwise. 22504746ed8SIngo Molnar */ 22604746ed8SIngo Molnar int available_idle_cpu(int cpu) 22704746ed8SIngo Molnar { 22804746ed8SIngo Molnar if (!idle_cpu(cpu)) 22904746ed8SIngo Molnar return 0; 23004746ed8SIngo Molnar 23104746ed8SIngo Molnar if (vcpu_is_preempted(cpu)) 23204746ed8SIngo Molnar return 0; 23304746ed8SIngo Molnar 23404746ed8SIngo Molnar return 1; 23504746ed8SIngo Molnar } 23604746ed8SIngo Molnar 23704746ed8SIngo Molnar /** 23804746ed8SIngo Molnar * idle_task - return the idle task for a given CPU. 23904746ed8SIngo Molnar * @cpu: the processor in question. 24004746ed8SIngo Molnar * 24104746ed8SIngo Molnar * Return: The idle task for the CPU @cpu. 24204746ed8SIngo Molnar */ 24304746ed8SIngo Molnar struct task_struct *idle_task(int cpu) 24404746ed8SIngo Molnar { 24504746ed8SIngo Molnar return cpu_rq(cpu)->idle; 24604746ed8SIngo Molnar } 24704746ed8SIngo Molnar 24804746ed8SIngo Molnar #ifdef CONFIG_SCHED_CORE 24904746ed8SIngo Molnar int sched_core_idle_cpu(int cpu) 25004746ed8SIngo Molnar { 25104746ed8SIngo Molnar struct rq *rq = cpu_rq(cpu); 25204746ed8SIngo Molnar 25304746ed8SIngo Molnar if (sched_core_enabled(rq) && rq->curr == rq->idle) 25404746ed8SIngo Molnar return 1; 25504746ed8SIngo Molnar 25604746ed8SIngo Molnar return idle_cpu(cpu); 25704746ed8SIngo Molnar } 25804746ed8SIngo Molnar 25904746ed8SIngo Molnar #endif 26004746ed8SIngo Molnar 26104746ed8SIngo Molnar #ifdef CONFIG_SMP 26204746ed8SIngo Molnar /* 26396fd6c65STejun Heo * Load avg and utiliztion metrics need to be updated periodically and before 26496fd6c65STejun Heo * consumption. This function updates the metrics for all subsystems except for 26596fd6c65STejun Heo * the fair class. @rq must be locked and have its clock updated. 26696fd6c65STejun Heo */ 26796fd6c65STejun Heo bool update_other_load_avgs(struct rq *rq) 26896fd6c65STejun Heo { 26996fd6c65STejun Heo u64 now = rq_clock_pelt(rq); 27096fd6c65STejun Heo const struct sched_class *curr_class = rq->curr->sched_class; 27196fd6c65STejun Heo unsigned long hw_pressure = arch_scale_hw_pressure(cpu_of(rq)); 27296fd6c65STejun Heo 27396fd6c65STejun Heo lockdep_assert_rq_held(rq); 27496fd6c65STejun Heo 27596fd6c65STejun Heo return update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) | 27696fd6c65STejun Heo update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) | 27796fd6c65STejun Heo update_hw_load_avg(now, rq, hw_pressure) | 27896fd6c65STejun Heo update_irq_load_avg(rq, 0); 27996fd6c65STejun Heo } 28096fd6c65STejun Heo 28196fd6c65STejun Heo /* 28204746ed8SIngo Molnar * This function computes an effective utilization for the given CPU, to be 28304746ed8SIngo Molnar * used for frequency selection given the linear relation: f = u * f_max. 28404746ed8SIngo Molnar * 28504746ed8SIngo Molnar * The scheduler tracks the following metrics: 28604746ed8SIngo Molnar * 28704746ed8SIngo Molnar * cpu_util_{cfs,rt,dl,irq}() 28804746ed8SIngo Molnar * cpu_bw_dl() 28904746ed8SIngo Molnar * 29004746ed8SIngo Molnar * Where the cfs,rt and dl util numbers are tracked with the same metric and 29104746ed8SIngo Molnar * synchronized windows and are thus directly comparable. 29204746ed8SIngo Molnar * 29304746ed8SIngo Molnar * The cfs,rt,dl utilization are the running times measured with rq->clock_task 29404746ed8SIngo Molnar * which excludes things like IRQ and steal-time. These latter are then accrued 295402de7fcSIngo Molnar * in the IRQ utilization. 29604746ed8SIngo Molnar * 297402de7fcSIngo Molnar * The DL bandwidth number OTOH is not a measured metric but a value computed 29804746ed8SIngo Molnar * based on the task model parameters and gives the minimal utilization 29904746ed8SIngo Molnar * required to meet deadlines. 30004746ed8SIngo Molnar */ 30104746ed8SIngo Molnar unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 30204746ed8SIngo Molnar unsigned long *min, 30304746ed8SIngo Molnar unsigned long *max) 30404746ed8SIngo Molnar { 30504746ed8SIngo Molnar unsigned long util, irq, scale; 30604746ed8SIngo Molnar struct rq *rq = cpu_rq(cpu); 30704746ed8SIngo Molnar 30804746ed8SIngo Molnar scale = arch_scale_cpu_capacity(cpu); 30904746ed8SIngo Molnar 31004746ed8SIngo Molnar /* 31104746ed8SIngo Molnar * Early check to see if IRQ/steal time saturates the CPU, can be 31204746ed8SIngo Molnar * because of inaccuracies in how we track these -- see 31304746ed8SIngo Molnar * update_irq_load_avg(). 31404746ed8SIngo Molnar */ 31504746ed8SIngo Molnar irq = cpu_util_irq(rq); 31604746ed8SIngo Molnar if (unlikely(irq >= scale)) { 31704746ed8SIngo Molnar if (min) 31804746ed8SIngo Molnar *min = scale; 31904746ed8SIngo Molnar if (max) 32004746ed8SIngo Molnar *max = scale; 32104746ed8SIngo Molnar return scale; 32204746ed8SIngo Molnar } 32304746ed8SIngo Molnar 32404746ed8SIngo Molnar if (min) { 32504746ed8SIngo Molnar /* 32604746ed8SIngo Molnar * The minimum utilization returns the highest level between: 32704746ed8SIngo Molnar * - the computed DL bandwidth needed with the IRQ pressure which 32804746ed8SIngo Molnar * steals time to the deadline task. 32904746ed8SIngo Molnar * - The minimum performance requirement for CFS and/or RT. 33004746ed8SIngo Molnar */ 33104746ed8SIngo Molnar *min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN)); 33204746ed8SIngo Molnar 33304746ed8SIngo Molnar /* 33404746ed8SIngo Molnar * When an RT task is runnable and uclamp is not used, we must 33504746ed8SIngo Molnar * ensure that the task will run at maximum compute capacity. 33604746ed8SIngo Molnar */ 33704746ed8SIngo Molnar if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt)) 33804746ed8SIngo Molnar *min = max(*min, scale); 33904746ed8SIngo Molnar } 34004746ed8SIngo Molnar 34104746ed8SIngo Molnar /* 34204746ed8SIngo Molnar * Because the time spend on RT/DL tasks is visible as 'lost' time to 34304746ed8SIngo Molnar * CFS tasks and we use the same metric to track the effective 34404746ed8SIngo Molnar * utilization (PELT windows are synchronized) we can directly add them 34504746ed8SIngo Molnar * to obtain the CPU's actual utilization. 34604746ed8SIngo Molnar */ 34704746ed8SIngo Molnar util = util_cfs + cpu_util_rt(rq); 34804746ed8SIngo Molnar util += cpu_util_dl(rq); 34904746ed8SIngo Molnar 35004746ed8SIngo Molnar /* 35104746ed8SIngo Molnar * The maximum hint is a soft bandwidth requirement, which can be lower 35204746ed8SIngo Molnar * than the actual utilization because of uclamp_max requirements. 35304746ed8SIngo Molnar */ 35404746ed8SIngo Molnar if (max) 35504746ed8SIngo Molnar *max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX)); 35604746ed8SIngo Molnar 35704746ed8SIngo Molnar if (util >= scale) 35804746ed8SIngo Molnar return scale; 35904746ed8SIngo Molnar 36004746ed8SIngo Molnar /* 36104746ed8SIngo Molnar * There is still idle time; further improve the number by using the 362402de7fcSIngo Molnar * IRQ metric. Because IRQ/steal time is hidden from the task clock we 36304746ed8SIngo Molnar * need to scale the task numbers: 36404746ed8SIngo Molnar * 36504746ed8SIngo Molnar * max - irq 36604746ed8SIngo Molnar * U' = irq + --------- * U 36704746ed8SIngo Molnar * max 36804746ed8SIngo Molnar */ 36904746ed8SIngo Molnar util = scale_irq_capacity(util, irq, scale); 37004746ed8SIngo Molnar util += irq; 37104746ed8SIngo Molnar 37204746ed8SIngo Molnar return min(scale, util); 37304746ed8SIngo Molnar } 37404746ed8SIngo Molnar 37504746ed8SIngo Molnar unsigned long sched_cpu_util(int cpu) 37604746ed8SIngo Molnar { 37704746ed8SIngo Molnar return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL); 37804746ed8SIngo Molnar } 37904746ed8SIngo Molnar #endif /* CONFIG_SMP */ 38004746ed8SIngo Molnar 38104746ed8SIngo Molnar /** 38204746ed8SIngo Molnar * find_process_by_pid - find a process with a matching PID value. 38304746ed8SIngo Molnar * @pid: the pid in question. 38404746ed8SIngo Molnar * 38504746ed8SIngo Molnar * The task of @pid, if found. %NULL otherwise. 38604746ed8SIngo Molnar */ 38704746ed8SIngo Molnar static struct task_struct *find_process_by_pid(pid_t pid) 38804746ed8SIngo Molnar { 38904746ed8SIngo Molnar return pid ? find_task_by_vpid(pid) : current; 39004746ed8SIngo Molnar } 39104746ed8SIngo Molnar 39204746ed8SIngo Molnar static struct task_struct *find_get_task(pid_t pid) 39304746ed8SIngo Molnar { 39404746ed8SIngo Molnar struct task_struct *p; 39504746ed8SIngo Molnar guard(rcu)(); 39604746ed8SIngo Molnar 39704746ed8SIngo Molnar p = find_process_by_pid(pid); 39804746ed8SIngo Molnar if (likely(p)) 39904746ed8SIngo Molnar get_task_struct(p); 40004746ed8SIngo Molnar 40104746ed8SIngo Molnar return p; 40204746ed8SIngo Molnar } 40304746ed8SIngo Molnar 40404746ed8SIngo Molnar DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T), 40504746ed8SIngo Molnar find_get_task(pid), pid_t pid) 40604746ed8SIngo Molnar 40704746ed8SIngo Molnar /* 40804746ed8SIngo Molnar * sched_setparam() passes in -1 for its policy, to let the functions 40904746ed8SIngo Molnar * it calls know not to change it. 41004746ed8SIngo Molnar */ 41104746ed8SIngo Molnar #define SETPARAM_POLICY -1 41204746ed8SIngo Molnar 41304746ed8SIngo Molnar static void __setscheduler_params(struct task_struct *p, 41404746ed8SIngo Molnar const struct sched_attr *attr) 41504746ed8SIngo Molnar { 41604746ed8SIngo Molnar int policy = attr->sched_policy; 41704746ed8SIngo Molnar 41804746ed8SIngo Molnar if (policy == SETPARAM_POLICY) 41904746ed8SIngo Molnar policy = p->policy; 42004746ed8SIngo Molnar 42104746ed8SIngo Molnar p->policy = policy; 42204746ed8SIngo Molnar 42304746ed8SIngo Molnar if (dl_policy(policy)) 42404746ed8SIngo Molnar __setparam_dl(p, attr); 42504746ed8SIngo Molnar else if (fair_policy(policy)) 42604746ed8SIngo Molnar p->static_prio = NICE_TO_PRIO(attr->sched_nice); 42704746ed8SIngo Molnar 42804746ed8SIngo Molnar /* 42904746ed8SIngo Molnar * __sched_setscheduler() ensures attr->sched_priority == 0 when 43004746ed8SIngo Molnar * !rt_policy. Always setting this ensures that things like 43104746ed8SIngo Molnar * getparam()/getattr() don't report silly values for !rt tasks. 43204746ed8SIngo Molnar */ 43304746ed8SIngo Molnar p->rt_priority = attr->sched_priority; 43404746ed8SIngo Molnar p->normal_prio = normal_prio(p); 43504746ed8SIngo Molnar set_load_weight(p, true); 43604746ed8SIngo Molnar } 43704746ed8SIngo Molnar 43804746ed8SIngo Molnar /* 43904746ed8SIngo Molnar * Check the target process has a UID that matches the current process's: 44004746ed8SIngo Molnar */ 44104746ed8SIngo Molnar static bool check_same_owner(struct task_struct *p) 44204746ed8SIngo Molnar { 44304746ed8SIngo Molnar const struct cred *cred = current_cred(), *pcred; 44404746ed8SIngo Molnar guard(rcu)(); 44504746ed8SIngo Molnar 44604746ed8SIngo Molnar pcred = __task_cred(p); 44704746ed8SIngo Molnar return (uid_eq(cred->euid, pcred->euid) || 44804746ed8SIngo Molnar uid_eq(cred->euid, pcred->uid)); 44904746ed8SIngo Molnar } 45004746ed8SIngo Molnar 45104746ed8SIngo Molnar #ifdef CONFIG_UCLAMP_TASK 45204746ed8SIngo Molnar 45304746ed8SIngo Molnar static int uclamp_validate(struct task_struct *p, 45404746ed8SIngo Molnar const struct sched_attr *attr) 45504746ed8SIngo Molnar { 45604746ed8SIngo Molnar int util_min = p->uclamp_req[UCLAMP_MIN].value; 45704746ed8SIngo Molnar int util_max = p->uclamp_req[UCLAMP_MAX].value; 45804746ed8SIngo Molnar 45904746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 46004746ed8SIngo Molnar util_min = attr->sched_util_min; 46104746ed8SIngo Molnar 46204746ed8SIngo Molnar if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) 46304746ed8SIngo Molnar return -EINVAL; 46404746ed8SIngo Molnar } 46504746ed8SIngo Molnar 46604746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 46704746ed8SIngo Molnar util_max = attr->sched_util_max; 46804746ed8SIngo Molnar 46904746ed8SIngo Molnar if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) 47004746ed8SIngo Molnar return -EINVAL; 47104746ed8SIngo Molnar } 47204746ed8SIngo Molnar 47304746ed8SIngo Molnar if (util_min != -1 && util_max != -1 && util_min > util_max) 47404746ed8SIngo Molnar return -EINVAL; 47504746ed8SIngo Molnar 47604746ed8SIngo Molnar /* 47704746ed8SIngo Molnar * We have valid uclamp attributes; make sure uclamp is enabled. 47804746ed8SIngo Molnar * 47904746ed8SIngo Molnar * We need to do that here, because enabling static branches is a 48004746ed8SIngo Molnar * blocking operation which obviously cannot be done while holding 48104746ed8SIngo Molnar * scheduler locks. 48204746ed8SIngo Molnar */ 48304746ed8SIngo Molnar static_branch_enable(&sched_uclamp_used); 48404746ed8SIngo Molnar 48504746ed8SIngo Molnar return 0; 48604746ed8SIngo Molnar } 48704746ed8SIngo Molnar 48804746ed8SIngo Molnar static bool uclamp_reset(const struct sched_attr *attr, 48904746ed8SIngo Molnar enum uclamp_id clamp_id, 49004746ed8SIngo Molnar struct uclamp_se *uc_se) 49104746ed8SIngo Molnar { 49204746ed8SIngo Molnar /* Reset on sched class change for a non user-defined clamp value. */ 49304746ed8SIngo Molnar if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && 49404746ed8SIngo Molnar !uc_se->user_defined) 49504746ed8SIngo Molnar return true; 49604746ed8SIngo Molnar 49704746ed8SIngo Molnar /* Reset on sched_util_{min,max} == -1. */ 49804746ed8SIngo Molnar if (clamp_id == UCLAMP_MIN && 49904746ed8SIngo Molnar attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 50004746ed8SIngo Molnar attr->sched_util_min == -1) { 50104746ed8SIngo Molnar return true; 50204746ed8SIngo Molnar } 50304746ed8SIngo Molnar 50404746ed8SIngo Molnar if (clamp_id == UCLAMP_MAX && 50504746ed8SIngo Molnar attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 50604746ed8SIngo Molnar attr->sched_util_max == -1) { 50704746ed8SIngo Molnar return true; 50804746ed8SIngo Molnar } 50904746ed8SIngo Molnar 51004746ed8SIngo Molnar return false; 51104746ed8SIngo Molnar } 51204746ed8SIngo Molnar 51304746ed8SIngo Molnar static void __setscheduler_uclamp(struct task_struct *p, 51404746ed8SIngo Molnar const struct sched_attr *attr) 51504746ed8SIngo Molnar { 51604746ed8SIngo Molnar enum uclamp_id clamp_id; 51704746ed8SIngo Molnar 51804746ed8SIngo Molnar for_each_clamp_id(clamp_id) { 51904746ed8SIngo Molnar struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 52004746ed8SIngo Molnar unsigned int value; 52104746ed8SIngo Molnar 52204746ed8SIngo Molnar if (!uclamp_reset(attr, clamp_id, uc_se)) 52304746ed8SIngo Molnar continue; 52404746ed8SIngo Molnar 52504746ed8SIngo Molnar /* 52604746ed8SIngo Molnar * RT by default have a 100% boost value that could be modified 52704746ed8SIngo Molnar * at runtime. 52804746ed8SIngo Molnar */ 52904746ed8SIngo Molnar if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 53004746ed8SIngo Molnar value = sysctl_sched_uclamp_util_min_rt_default; 53104746ed8SIngo Molnar else 53204746ed8SIngo Molnar value = uclamp_none(clamp_id); 53304746ed8SIngo Molnar 53404746ed8SIngo Molnar uclamp_se_set(uc_se, value, false); 53504746ed8SIngo Molnar 53604746ed8SIngo Molnar } 53704746ed8SIngo Molnar 53804746ed8SIngo Molnar if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 53904746ed8SIngo Molnar return; 54004746ed8SIngo Molnar 54104746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 54204746ed8SIngo Molnar attr->sched_util_min != -1) { 54304746ed8SIngo Molnar uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 54404746ed8SIngo Molnar attr->sched_util_min, true); 54504746ed8SIngo Molnar } 54604746ed8SIngo Molnar 54704746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 54804746ed8SIngo Molnar attr->sched_util_max != -1) { 54904746ed8SIngo Molnar uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 55004746ed8SIngo Molnar attr->sched_util_max, true); 55104746ed8SIngo Molnar } 55204746ed8SIngo Molnar } 55304746ed8SIngo Molnar 55404746ed8SIngo Molnar #else /* !CONFIG_UCLAMP_TASK: */ 55504746ed8SIngo Molnar 55604746ed8SIngo Molnar static inline int uclamp_validate(struct task_struct *p, 55704746ed8SIngo Molnar const struct sched_attr *attr) 55804746ed8SIngo Molnar { 55904746ed8SIngo Molnar return -EOPNOTSUPP; 56004746ed8SIngo Molnar } 56104746ed8SIngo Molnar static void __setscheduler_uclamp(struct task_struct *p, 56204746ed8SIngo Molnar const struct sched_attr *attr) { } 56304746ed8SIngo Molnar #endif 56404746ed8SIngo Molnar 56504746ed8SIngo Molnar /* 56604746ed8SIngo Molnar * Allow unprivileged RT tasks to decrease priority. 56704746ed8SIngo Molnar * Only issue a capable test if needed and only once to avoid an audit 56804746ed8SIngo Molnar * event on permitted non-privileged operations: 56904746ed8SIngo Molnar */ 57004746ed8SIngo Molnar static int user_check_sched_setscheduler(struct task_struct *p, 57104746ed8SIngo Molnar const struct sched_attr *attr, 57204746ed8SIngo Molnar int policy, int reset_on_fork) 57304746ed8SIngo Molnar { 57404746ed8SIngo Molnar if (fair_policy(policy)) { 57504746ed8SIngo Molnar if (attr->sched_nice < task_nice(p) && 57604746ed8SIngo Molnar !is_nice_reduction(p, attr->sched_nice)) 57704746ed8SIngo Molnar goto req_priv; 57804746ed8SIngo Molnar } 57904746ed8SIngo Molnar 58004746ed8SIngo Molnar if (rt_policy(policy)) { 58104746ed8SIngo Molnar unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); 58204746ed8SIngo Molnar 58304746ed8SIngo Molnar /* Can't set/change the rt policy: */ 58404746ed8SIngo Molnar if (policy != p->policy && !rlim_rtprio) 58504746ed8SIngo Molnar goto req_priv; 58604746ed8SIngo Molnar 58704746ed8SIngo Molnar /* Can't increase priority: */ 58804746ed8SIngo Molnar if (attr->sched_priority > p->rt_priority && 58904746ed8SIngo Molnar attr->sched_priority > rlim_rtprio) 59004746ed8SIngo Molnar goto req_priv; 59104746ed8SIngo Molnar } 59204746ed8SIngo Molnar 59304746ed8SIngo Molnar /* 59404746ed8SIngo Molnar * Can't set/change SCHED_DEADLINE policy at all for now 59504746ed8SIngo Molnar * (safest behavior); in the future we would like to allow 59604746ed8SIngo Molnar * unprivileged DL tasks to increase their relative deadline 59704746ed8SIngo Molnar * or reduce their runtime (both ways reducing utilization) 59804746ed8SIngo Molnar */ 59904746ed8SIngo Molnar if (dl_policy(policy)) 60004746ed8SIngo Molnar goto req_priv; 60104746ed8SIngo Molnar 60204746ed8SIngo Molnar /* 60304746ed8SIngo Molnar * Treat SCHED_IDLE as nice 20. Only allow a switch to 60404746ed8SIngo Molnar * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 60504746ed8SIngo Molnar */ 60604746ed8SIngo Molnar if (task_has_idle_policy(p) && !idle_policy(policy)) { 60704746ed8SIngo Molnar if (!is_nice_reduction(p, task_nice(p))) 60804746ed8SIngo Molnar goto req_priv; 60904746ed8SIngo Molnar } 61004746ed8SIngo Molnar 61104746ed8SIngo Molnar /* Can't change other user's priorities: */ 61204746ed8SIngo Molnar if (!check_same_owner(p)) 61304746ed8SIngo Molnar goto req_priv; 61404746ed8SIngo Molnar 61504746ed8SIngo Molnar /* Normal users shall not reset the sched_reset_on_fork flag: */ 61604746ed8SIngo Molnar if (p->sched_reset_on_fork && !reset_on_fork) 61704746ed8SIngo Molnar goto req_priv; 61804746ed8SIngo Molnar 61904746ed8SIngo Molnar return 0; 62004746ed8SIngo Molnar 62104746ed8SIngo Molnar req_priv: 62204746ed8SIngo Molnar if (!capable(CAP_SYS_NICE)) 62304746ed8SIngo Molnar return -EPERM; 62404746ed8SIngo Molnar 62504746ed8SIngo Molnar return 0; 62604746ed8SIngo Molnar } 62704746ed8SIngo Molnar 62804746ed8SIngo Molnar int __sched_setscheduler(struct task_struct *p, 62904746ed8SIngo Molnar const struct sched_attr *attr, 63004746ed8SIngo Molnar bool user, bool pi) 63104746ed8SIngo Molnar { 63204746ed8SIngo Molnar int oldpolicy = -1, policy = attr->sched_policy; 63304746ed8SIngo Molnar int retval, oldprio, newprio, queued, running; 63404746ed8SIngo Molnar const struct sched_class *prev_class; 63504746ed8SIngo Molnar struct balance_callback *head; 63604746ed8SIngo Molnar struct rq_flags rf; 63704746ed8SIngo Molnar int reset_on_fork; 63804746ed8SIngo Molnar int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 63904746ed8SIngo Molnar struct rq *rq; 64004746ed8SIngo Molnar bool cpuset_locked = false; 64104746ed8SIngo Molnar 64204746ed8SIngo Molnar /* The pi code expects interrupts enabled */ 64304746ed8SIngo Molnar BUG_ON(pi && in_interrupt()); 64404746ed8SIngo Molnar recheck: 64504746ed8SIngo Molnar /* Double check policy once rq lock held: */ 64604746ed8SIngo Molnar if (policy < 0) { 64704746ed8SIngo Molnar reset_on_fork = p->sched_reset_on_fork; 64804746ed8SIngo Molnar policy = oldpolicy = p->policy; 64904746ed8SIngo Molnar } else { 65004746ed8SIngo Molnar reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 65104746ed8SIngo Molnar 65204746ed8SIngo Molnar if (!valid_policy(policy)) 65304746ed8SIngo Molnar return -EINVAL; 65404746ed8SIngo Molnar } 65504746ed8SIngo Molnar 65604746ed8SIngo Molnar if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 65704746ed8SIngo Molnar return -EINVAL; 65804746ed8SIngo Molnar 65904746ed8SIngo Molnar /* 66004746ed8SIngo Molnar * Valid priorities for SCHED_FIFO and SCHED_RR are 66104746ed8SIngo Molnar * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, 66204746ed8SIngo Molnar * SCHED_BATCH and SCHED_IDLE is 0. 66304746ed8SIngo Molnar */ 66404746ed8SIngo Molnar if (attr->sched_priority > MAX_RT_PRIO-1) 66504746ed8SIngo Molnar return -EINVAL; 66604746ed8SIngo Molnar if ((dl_policy(policy) && !__checkparam_dl(attr)) || 66704746ed8SIngo Molnar (rt_policy(policy) != (attr->sched_priority != 0))) 66804746ed8SIngo Molnar return -EINVAL; 66904746ed8SIngo Molnar 67004746ed8SIngo Molnar if (user) { 67104746ed8SIngo Molnar retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); 67204746ed8SIngo Molnar if (retval) 67304746ed8SIngo Molnar return retval; 67404746ed8SIngo Molnar 67504746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_SUGOV) 67604746ed8SIngo Molnar return -EINVAL; 67704746ed8SIngo Molnar 67804746ed8SIngo Molnar retval = security_task_setscheduler(p); 67904746ed8SIngo Molnar if (retval) 68004746ed8SIngo Molnar return retval; 68104746ed8SIngo Molnar } 68204746ed8SIngo Molnar 68304746ed8SIngo Molnar /* Update task specific "requested" clamps */ 68404746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 68504746ed8SIngo Molnar retval = uclamp_validate(p, attr); 68604746ed8SIngo Molnar if (retval) 68704746ed8SIngo Molnar return retval; 68804746ed8SIngo Molnar } 68904746ed8SIngo Molnar 69004746ed8SIngo Molnar /* 69104746ed8SIngo Molnar * SCHED_DEADLINE bandwidth accounting relies on stable cpusets 69204746ed8SIngo Molnar * information. 69304746ed8SIngo Molnar */ 69404746ed8SIngo Molnar if (dl_policy(policy) || dl_policy(p->policy)) { 69504746ed8SIngo Molnar cpuset_locked = true; 69604746ed8SIngo Molnar cpuset_lock(); 69704746ed8SIngo Molnar } 69804746ed8SIngo Molnar 69904746ed8SIngo Molnar /* 70004746ed8SIngo Molnar * Make sure no PI-waiters arrive (or leave) while we are 70104746ed8SIngo Molnar * changing the priority of the task: 70204746ed8SIngo Molnar * 70304746ed8SIngo Molnar * To be able to change p->policy safely, the appropriate 70404746ed8SIngo Molnar * runqueue lock must be held. 70504746ed8SIngo Molnar */ 70604746ed8SIngo Molnar rq = task_rq_lock(p, &rf); 70704746ed8SIngo Molnar update_rq_clock(rq); 70804746ed8SIngo Molnar 70904746ed8SIngo Molnar /* 71004746ed8SIngo Molnar * Changing the policy of the stop threads its a very bad idea: 71104746ed8SIngo Molnar */ 71204746ed8SIngo Molnar if (p == rq->stop) { 71304746ed8SIngo Molnar retval = -EINVAL; 71404746ed8SIngo Molnar goto unlock; 71504746ed8SIngo Molnar } 71604746ed8SIngo Molnar 717*7bb6f081STejun Heo retval = scx_check_setscheduler(p, policy); 718*7bb6f081STejun Heo if (retval) 719*7bb6f081STejun Heo goto unlock; 720*7bb6f081STejun Heo 72104746ed8SIngo Molnar /* 72204746ed8SIngo Molnar * If not changing anything there's no need to proceed further, 72304746ed8SIngo Molnar * but store a possible modification of reset_on_fork. 72404746ed8SIngo Molnar */ 72504746ed8SIngo Molnar if (unlikely(policy == p->policy)) { 72604746ed8SIngo Molnar if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 72704746ed8SIngo Molnar goto change; 72804746ed8SIngo Molnar if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 72904746ed8SIngo Molnar goto change; 73004746ed8SIngo Molnar if (dl_policy(policy) && dl_param_changed(p, attr)) 73104746ed8SIngo Molnar goto change; 73204746ed8SIngo Molnar if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 73304746ed8SIngo Molnar goto change; 73404746ed8SIngo Molnar 73504746ed8SIngo Molnar p->sched_reset_on_fork = reset_on_fork; 73604746ed8SIngo Molnar retval = 0; 73704746ed8SIngo Molnar goto unlock; 73804746ed8SIngo Molnar } 73904746ed8SIngo Molnar change: 74004746ed8SIngo Molnar 74104746ed8SIngo Molnar if (user) { 74204746ed8SIngo Molnar #ifdef CONFIG_RT_GROUP_SCHED 74304746ed8SIngo Molnar /* 744402de7fcSIngo Molnar * Do not allow real-time tasks into groups that have no runtime 74504746ed8SIngo Molnar * assigned. 74604746ed8SIngo Molnar */ 74704746ed8SIngo Molnar if (rt_bandwidth_enabled() && rt_policy(policy) && 74804746ed8SIngo Molnar task_group(p)->rt_bandwidth.rt_runtime == 0 && 74904746ed8SIngo Molnar !task_group_is_autogroup(task_group(p))) { 75004746ed8SIngo Molnar retval = -EPERM; 75104746ed8SIngo Molnar goto unlock; 75204746ed8SIngo Molnar } 75304746ed8SIngo Molnar #endif 75404746ed8SIngo Molnar #ifdef CONFIG_SMP 75504746ed8SIngo Molnar if (dl_bandwidth_enabled() && dl_policy(policy) && 75604746ed8SIngo Molnar !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 75704746ed8SIngo Molnar cpumask_t *span = rq->rd->span; 75804746ed8SIngo Molnar 75904746ed8SIngo Molnar /* 76004746ed8SIngo Molnar * Don't allow tasks with an affinity mask smaller than 76104746ed8SIngo Molnar * the entire root_domain to become SCHED_DEADLINE. We 76204746ed8SIngo Molnar * will also fail if there's no bandwidth available. 76304746ed8SIngo Molnar */ 76404746ed8SIngo Molnar if (!cpumask_subset(span, p->cpus_ptr) || 76504746ed8SIngo Molnar rq->rd->dl_bw.bw == 0) { 76604746ed8SIngo Molnar retval = -EPERM; 76704746ed8SIngo Molnar goto unlock; 76804746ed8SIngo Molnar } 76904746ed8SIngo Molnar } 77004746ed8SIngo Molnar #endif 77104746ed8SIngo Molnar } 77204746ed8SIngo Molnar 77304746ed8SIngo Molnar /* Re-check policy now with rq lock held: */ 77404746ed8SIngo Molnar if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 77504746ed8SIngo Molnar policy = oldpolicy = -1; 77604746ed8SIngo Molnar task_rq_unlock(rq, p, &rf); 77704746ed8SIngo Molnar if (cpuset_locked) 77804746ed8SIngo Molnar cpuset_unlock(); 77904746ed8SIngo Molnar goto recheck; 78004746ed8SIngo Molnar } 78104746ed8SIngo Molnar 78204746ed8SIngo Molnar /* 78304746ed8SIngo Molnar * If setscheduling to SCHED_DEADLINE (or changing the parameters 78404746ed8SIngo Molnar * of a SCHED_DEADLINE task) we need to check if enough bandwidth 78504746ed8SIngo Molnar * is available. 78604746ed8SIngo Molnar */ 78704746ed8SIngo Molnar if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 78804746ed8SIngo Molnar retval = -EBUSY; 78904746ed8SIngo Molnar goto unlock; 79004746ed8SIngo Molnar } 79104746ed8SIngo Molnar 79204746ed8SIngo Molnar p->sched_reset_on_fork = reset_on_fork; 79304746ed8SIngo Molnar oldprio = p->prio; 79404746ed8SIngo Molnar 79504746ed8SIngo Molnar newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); 79604746ed8SIngo Molnar if (pi) { 79704746ed8SIngo Molnar /* 79804746ed8SIngo Molnar * Take priority boosted tasks into account. If the new 79904746ed8SIngo Molnar * effective priority is unchanged, we just store the new 80004746ed8SIngo Molnar * normal parameters and do not touch the scheduler class and 80104746ed8SIngo Molnar * the runqueue. This will be done when the task deboost 80204746ed8SIngo Molnar * itself. 80304746ed8SIngo Molnar */ 80404746ed8SIngo Molnar newprio = rt_effective_prio(p, newprio); 80504746ed8SIngo Molnar if (newprio == oldprio) 80604746ed8SIngo Molnar queue_flags &= ~DEQUEUE_MOVE; 80704746ed8SIngo Molnar } 80804746ed8SIngo Molnar 80904746ed8SIngo Molnar queued = task_on_rq_queued(p); 81004746ed8SIngo Molnar running = task_current(rq, p); 81104746ed8SIngo Molnar if (queued) 81204746ed8SIngo Molnar dequeue_task(rq, p, queue_flags); 81304746ed8SIngo Molnar if (running) 81404746ed8SIngo Molnar put_prev_task(rq, p); 81504746ed8SIngo Molnar 81604746ed8SIngo Molnar prev_class = p->sched_class; 81704746ed8SIngo Molnar 81804746ed8SIngo Molnar if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { 81904746ed8SIngo Molnar __setscheduler_params(p, attr); 82004746ed8SIngo Molnar __setscheduler_prio(p, newprio); 82104746ed8SIngo Molnar } 82204746ed8SIngo Molnar __setscheduler_uclamp(p, attr); 823d8c7bc2eSTejun Heo check_class_changing(rq, p, prev_class); 82404746ed8SIngo Molnar 82504746ed8SIngo Molnar if (queued) { 82604746ed8SIngo Molnar /* 82704746ed8SIngo Molnar * We enqueue to tail when the priority of a task is 82804746ed8SIngo Molnar * increased (user space view). 82904746ed8SIngo Molnar */ 83004746ed8SIngo Molnar if (oldprio < p->prio) 83104746ed8SIngo Molnar queue_flags |= ENQUEUE_HEAD; 83204746ed8SIngo Molnar 83304746ed8SIngo Molnar enqueue_task(rq, p, queue_flags); 83404746ed8SIngo Molnar } 83504746ed8SIngo Molnar if (running) 83604746ed8SIngo Molnar set_next_task(rq, p); 83704746ed8SIngo Molnar 83804746ed8SIngo Molnar check_class_changed(rq, p, prev_class, oldprio); 83904746ed8SIngo Molnar 84004746ed8SIngo Molnar /* Avoid rq from going away on us: */ 84104746ed8SIngo Molnar preempt_disable(); 84204746ed8SIngo Molnar head = splice_balance_callbacks(rq); 84304746ed8SIngo Molnar task_rq_unlock(rq, p, &rf); 84404746ed8SIngo Molnar 84504746ed8SIngo Molnar if (pi) { 84604746ed8SIngo Molnar if (cpuset_locked) 84704746ed8SIngo Molnar cpuset_unlock(); 84804746ed8SIngo Molnar rt_mutex_adjust_pi(p); 84904746ed8SIngo Molnar } 85004746ed8SIngo Molnar 85104746ed8SIngo Molnar /* Run balance callbacks after we've adjusted the PI chain: */ 85204746ed8SIngo Molnar balance_callbacks(rq, head); 85304746ed8SIngo Molnar preempt_enable(); 85404746ed8SIngo Molnar 85504746ed8SIngo Molnar return 0; 85604746ed8SIngo Molnar 85704746ed8SIngo Molnar unlock: 85804746ed8SIngo Molnar task_rq_unlock(rq, p, &rf); 85904746ed8SIngo Molnar if (cpuset_locked) 86004746ed8SIngo Molnar cpuset_unlock(); 86104746ed8SIngo Molnar return retval; 86204746ed8SIngo Molnar } 86304746ed8SIngo Molnar 86404746ed8SIngo Molnar static int _sched_setscheduler(struct task_struct *p, int policy, 86504746ed8SIngo Molnar const struct sched_param *param, bool check) 86604746ed8SIngo Molnar { 86704746ed8SIngo Molnar struct sched_attr attr = { 86804746ed8SIngo Molnar .sched_policy = policy, 86904746ed8SIngo Molnar .sched_priority = param->sched_priority, 87004746ed8SIngo Molnar .sched_nice = PRIO_TO_NICE(p->static_prio), 87104746ed8SIngo Molnar }; 87204746ed8SIngo Molnar 87304746ed8SIngo Molnar /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 87404746ed8SIngo Molnar if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 87504746ed8SIngo Molnar attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 87604746ed8SIngo Molnar policy &= ~SCHED_RESET_ON_FORK; 87704746ed8SIngo Molnar attr.sched_policy = policy; 87804746ed8SIngo Molnar } 87904746ed8SIngo Molnar 88004746ed8SIngo Molnar return __sched_setscheduler(p, &attr, check, true); 88104746ed8SIngo Molnar } 88204746ed8SIngo Molnar /** 88304746ed8SIngo Molnar * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 88404746ed8SIngo Molnar * @p: the task in question. 88504746ed8SIngo Molnar * @policy: new policy. 88604746ed8SIngo Molnar * @param: structure containing the new RT priority. 88704746ed8SIngo Molnar * 88804746ed8SIngo Molnar * Use sched_set_fifo(), read its comment. 88904746ed8SIngo Molnar * 89004746ed8SIngo Molnar * Return: 0 on success. An error code otherwise. 89104746ed8SIngo Molnar * 89204746ed8SIngo Molnar * NOTE that the task may be already dead. 89304746ed8SIngo Molnar */ 89404746ed8SIngo Molnar int sched_setscheduler(struct task_struct *p, int policy, 89504746ed8SIngo Molnar const struct sched_param *param) 89604746ed8SIngo Molnar { 89704746ed8SIngo Molnar return _sched_setscheduler(p, policy, param, true); 89804746ed8SIngo Molnar } 89904746ed8SIngo Molnar 90004746ed8SIngo Molnar int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 90104746ed8SIngo Molnar { 90204746ed8SIngo Molnar return __sched_setscheduler(p, attr, true, true); 90304746ed8SIngo Molnar } 90404746ed8SIngo Molnar 90504746ed8SIngo Molnar int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 90604746ed8SIngo Molnar { 90704746ed8SIngo Molnar return __sched_setscheduler(p, attr, false, true); 90804746ed8SIngo Molnar } 90904746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_setattr_nocheck); 91004746ed8SIngo Molnar 91104746ed8SIngo Molnar /** 912402de7fcSIngo Molnar * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernel-space. 91304746ed8SIngo Molnar * @p: the task in question. 91404746ed8SIngo Molnar * @policy: new policy. 91504746ed8SIngo Molnar * @param: structure containing the new RT priority. 91604746ed8SIngo Molnar * 91704746ed8SIngo Molnar * Just like sched_setscheduler, only don't bother checking if the 91804746ed8SIngo Molnar * current context has permission. For example, this is needed in 91904746ed8SIngo Molnar * stop_machine(): we create temporary high priority worker threads, 92004746ed8SIngo Molnar * but our caller might not have that capability. 92104746ed8SIngo Molnar * 92204746ed8SIngo Molnar * Return: 0 on success. An error code otherwise. 92304746ed8SIngo Molnar */ 92404746ed8SIngo Molnar int sched_setscheduler_nocheck(struct task_struct *p, int policy, 92504746ed8SIngo Molnar const struct sched_param *param) 92604746ed8SIngo Molnar { 92704746ed8SIngo Molnar return _sched_setscheduler(p, policy, param, false); 92804746ed8SIngo Molnar } 92904746ed8SIngo Molnar 93004746ed8SIngo Molnar /* 93104746ed8SIngo Molnar * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally 93204746ed8SIngo Molnar * incapable of resource management, which is the one thing an OS really should 93304746ed8SIngo Molnar * be doing. 93404746ed8SIngo Molnar * 93504746ed8SIngo Molnar * This is of course the reason it is limited to privileged users only. 93604746ed8SIngo Molnar * 93704746ed8SIngo Molnar * Worse still; it is fundamentally impossible to compose static priority 93804746ed8SIngo Molnar * workloads. You cannot take two correctly working static prio workloads 93904746ed8SIngo Molnar * and smash them together and still expect them to work. 94004746ed8SIngo Molnar * 94104746ed8SIngo Molnar * For this reason 'all' FIFO tasks the kernel creates are basically at: 94204746ed8SIngo Molnar * 94304746ed8SIngo Molnar * MAX_RT_PRIO / 2 94404746ed8SIngo Molnar * 94504746ed8SIngo Molnar * The administrator _MUST_ configure the system, the kernel simply doesn't 94604746ed8SIngo Molnar * know enough information to make a sensible choice. 94704746ed8SIngo Molnar */ 94804746ed8SIngo Molnar void sched_set_fifo(struct task_struct *p) 94904746ed8SIngo Molnar { 95004746ed8SIngo Molnar struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; 95104746ed8SIngo Molnar WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 95204746ed8SIngo Molnar } 95304746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_set_fifo); 95404746ed8SIngo Molnar 95504746ed8SIngo Molnar /* 95604746ed8SIngo Molnar * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. 95704746ed8SIngo Molnar */ 95804746ed8SIngo Molnar void sched_set_fifo_low(struct task_struct *p) 95904746ed8SIngo Molnar { 96004746ed8SIngo Molnar struct sched_param sp = { .sched_priority = 1 }; 96104746ed8SIngo Molnar WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 96204746ed8SIngo Molnar } 96304746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_set_fifo_low); 96404746ed8SIngo Molnar 96504746ed8SIngo Molnar void sched_set_normal(struct task_struct *p, int nice) 96604746ed8SIngo Molnar { 96704746ed8SIngo Molnar struct sched_attr attr = { 96804746ed8SIngo Molnar .sched_policy = SCHED_NORMAL, 96904746ed8SIngo Molnar .sched_nice = nice, 97004746ed8SIngo Molnar }; 97104746ed8SIngo Molnar WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); 97204746ed8SIngo Molnar } 97304746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_set_normal); 97404746ed8SIngo Molnar 97504746ed8SIngo Molnar static int 97604746ed8SIngo Molnar do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 97704746ed8SIngo Molnar { 97804746ed8SIngo Molnar struct sched_param lparam; 97904746ed8SIngo Molnar 98004746ed8SIngo Molnar if (!param || pid < 0) 98104746ed8SIngo Molnar return -EINVAL; 98204746ed8SIngo Molnar if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 98304746ed8SIngo Molnar return -EFAULT; 98404746ed8SIngo Molnar 98504746ed8SIngo Molnar CLASS(find_get_task, p)(pid); 98604746ed8SIngo Molnar if (!p) 98704746ed8SIngo Molnar return -ESRCH; 98804746ed8SIngo Molnar 98904746ed8SIngo Molnar return sched_setscheduler(p, policy, &lparam); 99004746ed8SIngo Molnar } 99104746ed8SIngo Molnar 99204746ed8SIngo Molnar /* 99304746ed8SIngo Molnar * Mimics kernel/events/core.c perf_copy_attr(). 99404746ed8SIngo Molnar */ 99504746ed8SIngo Molnar static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 99604746ed8SIngo Molnar { 99704746ed8SIngo Molnar u32 size; 99804746ed8SIngo Molnar int ret; 99904746ed8SIngo Molnar 100004746ed8SIngo Molnar /* Zero the full structure, so that a short copy will be nice: */ 100104746ed8SIngo Molnar memset(attr, 0, sizeof(*attr)); 100204746ed8SIngo Molnar 100304746ed8SIngo Molnar ret = get_user(size, &uattr->size); 100404746ed8SIngo Molnar if (ret) 100504746ed8SIngo Molnar return ret; 100604746ed8SIngo Molnar 100704746ed8SIngo Molnar /* ABI compatibility quirk: */ 100804746ed8SIngo Molnar if (!size) 100904746ed8SIngo Molnar size = SCHED_ATTR_SIZE_VER0; 101004746ed8SIngo Molnar if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 101104746ed8SIngo Molnar goto err_size; 101204746ed8SIngo Molnar 101304746ed8SIngo Molnar ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 101404746ed8SIngo Molnar if (ret) { 101504746ed8SIngo Molnar if (ret == -E2BIG) 101604746ed8SIngo Molnar goto err_size; 101704746ed8SIngo Molnar return ret; 101804746ed8SIngo Molnar } 101904746ed8SIngo Molnar 102004746ed8SIngo Molnar if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 102104746ed8SIngo Molnar size < SCHED_ATTR_SIZE_VER1) 102204746ed8SIngo Molnar return -EINVAL; 102304746ed8SIngo Molnar 102404746ed8SIngo Molnar /* 102504746ed8SIngo Molnar * XXX: Do we want to be lenient like existing syscalls; or do we want 102604746ed8SIngo Molnar * to be strict and return an error on out-of-bounds values? 102704746ed8SIngo Molnar */ 102804746ed8SIngo Molnar attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 102904746ed8SIngo Molnar 103004746ed8SIngo Molnar return 0; 103104746ed8SIngo Molnar 103204746ed8SIngo Molnar err_size: 103304746ed8SIngo Molnar put_user(sizeof(*attr), &uattr->size); 103404746ed8SIngo Molnar return -E2BIG; 103504746ed8SIngo Molnar } 103604746ed8SIngo Molnar 103704746ed8SIngo Molnar static void get_params(struct task_struct *p, struct sched_attr *attr) 103804746ed8SIngo Molnar { 103904746ed8SIngo Molnar if (task_has_dl_policy(p)) 104004746ed8SIngo Molnar __getparam_dl(p, attr); 104104746ed8SIngo Molnar else if (task_has_rt_policy(p)) 104204746ed8SIngo Molnar attr->sched_priority = p->rt_priority; 104304746ed8SIngo Molnar else 104404746ed8SIngo Molnar attr->sched_nice = task_nice(p); 104504746ed8SIngo Molnar } 104604746ed8SIngo Molnar 104704746ed8SIngo Molnar /** 104804746ed8SIngo Molnar * sys_sched_setscheduler - set/change the scheduler policy and RT priority 104904746ed8SIngo Molnar * @pid: the pid in question. 105004746ed8SIngo Molnar * @policy: new policy. 105104746ed8SIngo Molnar * @param: structure containing the new RT priority. 105204746ed8SIngo Molnar * 105304746ed8SIngo Molnar * Return: 0 on success. An error code otherwise. 105404746ed8SIngo Molnar */ 105504746ed8SIngo Molnar SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 105604746ed8SIngo Molnar { 105704746ed8SIngo Molnar if (policy < 0) 105804746ed8SIngo Molnar return -EINVAL; 105904746ed8SIngo Molnar 106004746ed8SIngo Molnar return do_sched_setscheduler(pid, policy, param); 106104746ed8SIngo Molnar } 106204746ed8SIngo Molnar 106304746ed8SIngo Molnar /** 106404746ed8SIngo Molnar * sys_sched_setparam - set/change the RT priority of a thread 106504746ed8SIngo Molnar * @pid: the pid in question. 106604746ed8SIngo Molnar * @param: structure containing the new RT priority. 106704746ed8SIngo Molnar * 106804746ed8SIngo Molnar * Return: 0 on success. An error code otherwise. 106904746ed8SIngo Molnar */ 107004746ed8SIngo Molnar SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 107104746ed8SIngo Molnar { 107204746ed8SIngo Molnar return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 107304746ed8SIngo Molnar } 107404746ed8SIngo Molnar 107504746ed8SIngo Molnar /** 107604746ed8SIngo Molnar * sys_sched_setattr - same as above, but with extended sched_attr 107704746ed8SIngo Molnar * @pid: the pid in question. 107804746ed8SIngo Molnar * @uattr: structure containing the extended parameters. 107904746ed8SIngo Molnar * @flags: for future extension. 108004746ed8SIngo Molnar */ 108104746ed8SIngo Molnar SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 108204746ed8SIngo Molnar unsigned int, flags) 108304746ed8SIngo Molnar { 108404746ed8SIngo Molnar struct sched_attr attr; 108504746ed8SIngo Molnar int retval; 108604746ed8SIngo Molnar 108704746ed8SIngo Molnar if (!uattr || pid < 0 || flags) 108804746ed8SIngo Molnar return -EINVAL; 108904746ed8SIngo Molnar 109004746ed8SIngo Molnar retval = sched_copy_attr(uattr, &attr); 109104746ed8SIngo Molnar if (retval) 109204746ed8SIngo Molnar return retval; 109304746ed8SIngo Molnar 109404746ed8SIngo Molnar if ((int)attr.sched_policy < 0) 109504746ed8SIngo Molnar return -EINVAL; 109604746ed8SIngo Molnar if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 109704746ed8SIngo Molnar attr.sched_policy = SETPARAM_POLICY; 109804746ed8SIngo Molnar 109904746ed8SIngo Molnar CLASS(find_get_task, p)(pid); 110004746ed8SIngo Molnar if (!p) 110104746ed8SIngo Molnar return -ESRCH; 110204746ed8SIngo Molnar 110304746ed8SIngo Molnar if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS) 110404746ed8SIngo Molnar get_params(p, &attr); 110504746ed8SIngo Molnar 110604746ed8SIngo Molnar return sched_setattr(p, &attr); 110704746ed8SIngo Molnar } 110804746ed8SIngo Molnar 110904746ed8SIngo Molnar /** 111004746ed8SIngo Molnar * sys_sched_getscheduler - get the policy (scheduling class) of a thread 111104746ed8SIngo Molnar * @pid: the pid in question. 111204746ed8SIngo Molnar * 111304746ed8SIngo Molnar * Return: On success, the policy of the thread. Otherwise, a negative error 111404746ed8SIngo Molnar * code. 111504746ed8SIngo Molnar */ 111604746ed8SIngo Molnar SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 111704746ed8SIngo Molnar { 111804746ed8SIngo Molnar struct task_struct *p; 111904746ed8SIngo Molnar int retval; 112004746ed8SIngo Molnar 112104746ed8SIngo Molnar if (pid < 0) 112204746ed8SIngo Molnar return -EINVAL; 112304746ed8SIngo Molnar 112404746ed8SIngo Molnar guard(rcu)(); 112504746ed8SIngo Molnar p = find_process_by_pid(pid); 112604746ed8SIngo Molnar if (!p) 112704746ed8SIngo Molnar return -ESRCH; 112804746ed8SIngo Molnar 112904746ed8SIngo Molnar retval = security_task_getscheduler(p); 113004746ed8SIngo Molnar if (!retval) { 113104746ed8SIngo Molnar retval = p->policy; 113204746ed8SIngo Molnar if (p->sched_reset_on_fork) 113304746ed8SIngo Molnar retval |= SCHED_RESET_ON_FORK; 113404746ed8SIngo Molnar } 113504746ed8SIngo Molnar return retval; 113604746ed8SIngo Molnar } 113704746ed8SIngo Molnar 113804746ed8SIngo Molnar /** 113904746ed8SIngo Molnar * sys_sched_getparam - get the RT priority of a thread 114004746ed8SIngo Molnar * @pid: the pid in question. 114104746ed8SIngo Molnar * @param: structure containing the RT priority. 114204746ed8SIngo Molnar * 114304746ed8SIngo Molnar * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 114404746ed8SIngo Molnar * code. 114504746ed8SIngo Molnar */ 114604746ed8SIngo Molnar SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 114704746ed8SIngo Molnar { 114804746ed8SIngo Molnar struct sched_param lp = { .sched_priority = 0 }; 114904746ed8SIngo Molnar struct task_struct *p; 115004746ed8SIngo Molnar int retval; 115104746ed8SIngo Molnar 115204746ed8SIngo Molnar if (!param || pid < 0) 115304746ed8SIngo Molnar return -EINVAL; 115404746ed8SIngo Molnar 115504746ed8SIngo Molnar scoped_guard (rcu) { 115604746ed8SIngo Molnar p = find_process_by_pid(pid); 115704746ed8SIngo Molnar if (!p) 115804746ed8SIngo Molnar return -ESRCH; 115904746ed8SIngo Molnar 116004746ed8SIngo Molnar retval = security_task_getscheduler(p); 116104746ed8SIngo Molnar if (retval) 116204746ed8SIngo Molnar return retval; 116304746ed8SIngo Molnar 116404746ed8SIngo Molnar if (task_has_rt_policy(p)) 116504746ed8SIngo Molnar lp.sched_priority = p->rt_priority; 116604746ed8SIngo Molnar } 116704746ed8SIngo Molnar 116804746ed8SIngo Molnar /* 116904746ed8SIngo Molnar * This one might sleep, we cannot do it with a spinlock held ... 117004746ed8SIngo Molnar */ 117104746ed8SIngo Molnar return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 117204746ed8SIngo Molnar } 117304746ed8SIngo Molnar 117404746ed8SIngo Molnar /* 117504746ed8SIngo Molnar * Copy the kernel size attribute structure (which might be larger 117604746ed8SIngo Molnar * than what user-space knows about) to user-space. 117704746ed8SIngo Molnar * 117804746ed8SIngo Molnar * Note that all cases are valid: user-space buffer can be larger or 117904746ed8SIngo Molnar * smaller than the kernel-space buffer. The usual case is that both 118004746ed8SIngo Molnar * have the same size. 118104746ed8SIngo Molnar */ 118204746ed8SIngo Molnar static int 118304746ed8SIngo Molnar sched_attr_copy_to_user(struct sched_attr __user *uattr, 118404746ed8SIngo Molnar struct sched_attr *kattr, 118504746ed8SIngo Molnar unsigned int usize) 118604746ed8SIngo Molnar { 118704746ed8SIngo Molnar unsigned int ksize = sizeof(*kattr); 118804746ed8SIngo Molnar 118904746ed8SIngo Molnar if (!access_ok(uattr, usize)) 119004746ed8SIngo Molnar return -EFAULT; 119104746ed8SIngo Molnar 119204746ed8SIngo Molnar /* 119304746ed8SIngo Molnar * sched_getattr() ABI forwards and backwards compatibility: 119404746ed8SIngo Molnar * 119504746ed8SIngo Molnar * If usize == ksize then we just copy everything to user-space and all is good. 119604746ed8SIngo Molnar * 119704746ed8SIngo Molnar * If usize < ksize then we only copy as much as user-space has space for, 119804746ed8SIngo Molnar * this keeps ABI compatibility as well. We skip the rest. 119904746ed8SIngo Molnar * 120004746ed8SIngo Molnar * If usize > ksize then user-space is using a newer version of the ABI, 120104746ed8SIngo Molnar * which part the kernel doesn't know about. Just ignore it - tooling can 120204746ed8SIngo Molnar * detect the kernel's knowledge of attributes from the attr->size value 120304746ed8SIngo Molnar * which is set to ksize in this case. 120404746ed8SIngo Molnar */ 120504746ed8SIngo Molnar kattr->size = min(usize, ksize); 120604746ed8SIngo Molnar 120704746ed8SIngo Molnar if (copy_to_user(uattr, kattr, kattr->size)) 120804746ed8SIngo Molnar return -EFAULT; 120904746ed8SIngo Molnar 121004746ed8SIngo Molnar return 0; 121104746ed8SIngo Molnar } 121204746ed8SIngo Molnar 121304746ed8SIngo Molnar /** 121404746ed8SIngo Molnar * sys_sched_getattr - similar to sched_getparam, but with sched_attr 121504746ed8SIngo Molnar * @pid: the pid in question. 121604746ed8SIngo Molnar * @uattr: structure containing the extended parameters. 121704746ed8SIngo Molnar * @usize: sizeof(attr) for fwd/bwd comp. 121804746ed8SIngo Molnar * @flags: for future extension. 121904746ed8SIngo Molnar */ 122004746ed8SIngo Molnar SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 122104746ed8SIngo Molnar unsigned int, usize, unsigned int, flags) 122204746ed8SIngo Molnar { 122304746ed8SIngo Molnar struct sched_attr kattr = { }; 122404746ed8SIngo Molnar struct task_struct *p; 122504746ed8SIngo Molnar int retval; 122604746ed8SIngo Molnar 122704746ed8SIngo Molnar if (!uattr || pid < 0 || usize > PAGE_SIZE || 122804746ed8SIngo Molnar usize < SCHED_ATTR_SIZE_VER0 || flags) 122904746ed8SIngo Molnar return -EINVAL; 123004746ed8SIngo Molnar 123104746ed8SIngo Molnar scoped_guard (rcu) { 123204746ed8SIngo Molnar p = find_process_by_pid(pid); 123304746ed8SIngo Molnar if (!p) 123404746ed8SIngo Molnar return -ESRCH; 123504746ed8SIngo Molnar 123604746ed8SIngo Molnar retval = security_task_getscheduler(p); 123704746ed8SIngo Molnar if (retval) 123804746ed8SIngo Molnar return retval; 123904746ed8SIngo Molnar 124004746ed8SIngo Molnar kattr.sched_policy = p->policy; 124104746ed8SIngo Molnar if (p->sched_reset_on_fork) 124204746ed8SIngo Molnar kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 124304746ed8SIngo Molnar get_params(p, &kattr); 124404746ed8SIngo Molnar kattr.sched_flags &= SCHED_FLAG_ALL; 124504746ed8SIngo Molnar 124604746ed8SIngo Molnar #ifdef CONFIG_UCLAMP_TASK 124704746ed8SIngo Molnar /* 124804746ed8SIngo Molnar * This could race with another potential updater, but this is fine 124904746ed8SIngo Molnar * because it'll correctly read the old or the new value. We don't need 125004746ed8SIngo Molnar * to guarantee who wins the race as long as it doesn't return garbage. 125104746ed8SIngo Molnar */ 125204746ed8SIngo Molnar kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 125304746ed8SIngo Molnar kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 125404746ed8SIngo Molnar #endif 125504746ed8SIngo Molnar } 125604746ed8SIngo Molnar 125704746ed8SIngo Molnar return sched_attr_copy_to_user(uattr, &kattr, usize); 125804746ed8SIngo Molnar } 125904746ed8SIngo Molnar 126004746ed8SIngo Molnar #ifdef CONFIG_SMP 126104746ed8SIngo Molnar int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 126204746ed8SIngo Molnar { 126304746ed8SIngo Molnar /* 126404746ed8SIngo Molnar * If the task isn't a deadline task or admission control is 126504746ed8SIngo Molnar * disabled then we don't care about affinity changes. 126604746ed8SIngo Molnar */ 126704746ed8SIngo Molnar if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) 126804746ed8SIngo Molnar return 0; 126904746ed8SIngo Molnar 127004746ed8SIngo Molnar /* 127104746ed8SIngo Molnar * Since bandwidth control happens on root_domain basis, 127204746ed8SIngo Molnar * if admission test is enabled, we only admit -deadline 127304746ed8SIngo Molnar * tasks allowed to run on all the CPUs in the task's 127404746ed8SIngo Molnar * root_domain. 127504746ed8SIngo Molnar */ 127604746ed8SIngo Molnar guard(rcu)(); 127704746ed8SIngo Molnar if (!cpumask_subset(task_rq(p)->rd->span, mask)) 127804746ed8SIngo Molnar return -EBUSY; 127904746ed8SIngo Molnar 128004746ed8SIngo Molnar return 0; 128104746ed8SIngo Molnar } 128204746ed8SIngo Molnar #endif /* CONFIG_SMP */ 128304746ed8SIngo Molnar 128404746ed8SIngo Molnar int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) 128504746ed8SIngo Molnar { 128604746ed8SIngo Molnar int retval; 128704746ed8SIngo Molnar cpumask_var_t cpus_allowed, new_mask; 128804746ed8SIngo Molnar 128904746ed8SIngo Molnar if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) 129004746ed8SIngo Molnar return -ENOMEM; 129104746ed8SIngo Molnar 129204746ed8SIngo Molnar if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 129304746ed8SIngo Molnar retval = -ENOMEM; 129404746ed8SIngo Molnar goto out_free_cpus_allowed; 129504746ed8SIngo Molnar } 129604746ed8SIngo Molnar 129704746ed8SIngo Molnar cpuset_cpus_allowed(p, cpus_allowed); 129804746ed8SIngo Molnar cpumask_and(new_mask, ctx->new_mask, cpus_allowed); 129904746ed8SIngo Molnar 130004746ed8SIngo Molnar ctx->new_mask = new_mask; 130104746ed8SIngo Molnar ctx->flags |= SCA_CHECK; 130204746ed8SIngo Molnar 130304746ed8SIngo Molnar retval = dl_task_check_affinity(p, new_mask); 130404746ed8SIngo Molnar if (retval) 130504746ed8SIngo Molnar goto out_free_new_mask; 130604746ed8SIngo Molnar 130704746ed8SIngo Molnar retval = __set_cpus_allowed_ptr(p, ctx); 130804746ed8SIngo Molnar if (retval) 130904746ed8SIngo Molnar goto out_free_new_mask; 131004746ed8SIngo Molnar 131104746ed8SIngo Molnar cpuset_cpus_allowed(p, cpus_allowed); 131204746ed8SIngo Molnar if (!cpumask_subset(new_mask, cpus_allowed)) { 131304746ed8SIngo Molnar /* 131404746ed8SIngo Molnar * We must have raced with a concurrent cpuset update. 131504746ed8SIngo Molnar * Just reset the cpumask to the cpuset's cpus_allowed. 131604746ed8SIngo Molnar */ 131704746ed8SIngo Molnar cpumask_copy(new_mask, cpus_allowed); 131804746ed8SIngo Molnar 131904746ed8SIngo Molnar /* 132004746ed8SIngo Molnar * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr() 132104746ed8SIngo Molnar * will restore the previous user_cpus_ptr value. 132204746ed8SIngo Molnar * 132304746ed8SIngo Molnar * In the unlikely event a previous user_cpus_ptr exists, 132404746ed8SIngo Molnar * we need to further restrict the mask to what is allowed 132504746ed8SIngo Molnar * by that old user_cpus_ptr. 132604746ed8SIngo Molnar */ 132704746ed8SIngo Molnar if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) { 132804746ed8SIngo Molnar bool empty = !cpumask_and(new_mask, new_mask, 132904746ed8SIngo Molnar ctx->user_mask); 133004746ed8SIngo Molnar 133104746ed8SIngo Molnar if (WARN_ON_ONCE(empty)) 133204746ed8SIngo Molnar cpumask_copy(new_mask, cpus_allowed); 133304746ed8SIngo Molnar } 133404746ed8SIngo Molnar __set_cpus_allowed_ptr(p, ctx); 133504746ed8SIngo Molnar retval = -EINVAL; 133604746ed8SIngo Molnar } 133704746ed8SIngo Molnar 133804746ed8SIngo Molnar out_free_new_mask: 133904746ed8SIngo Molnar free_cpumask_var(new_mask); 134004746ed8SIngo Molnar out_free_cpus_allowed: 134104746ed8SIngo Molnar free_cpumask_var(cpus_allowed); 134204746ed8SIngo Molnar return retval; 134304746ed8SIngo Molnar } 134404746ed8SIngo Molnar 134504746ed8SIngo Molnar long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 134604746ed8SIngo Molnar { 134704746ed8SIngo Molnar struct affinity_context ac; 134804746ed8SIngo Molnar struct cpumask *user_mask; 134904746ed8SIngo Molnar int retval; 135004746ed8SIngo Molnar 135104746ed8SIngo Molnar CLASS(find_get_task, p)(pid); 135204746ed8SIngo Molnar if (!p) 135304746ed8SIngo Molnar return -ESRCH; 135404746ed8SIngo Molnar 135504746ed8SIngo Molnar if (p->flags & PF_NO_SETAFFINITY) 135604746ed8SIngo Molnar return -EINVAL; 135704746ed8SIngo Molnar 135804746ed8SIngo Molnar if (!check_same_owner(p)) { 135904746ed8SIngo Molnar guard(rcu)(); 136004746ed8SIngo Molnar if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) 136104746ed8SIngo Molnar return -EPERM; 136204746ed8SIngo Molnar } 136304746ed8SIngo Molnar 136404746ed8SIngo Molnar retval = security_task_setscheduler(p); 136504746ed8SIngo Molnar if (retval) 136604746ed8SIngo Molnar return retval; 136704746ed8SIngo Molnar 136804746ed8SIngo Molnar /* 136904746ed8SIngo Molnar * With non-SMP configs, user_cpus_ptr/user_mask isn't used and 137004746ed8SIngo Molnar * alloc_user_cpus_ptr() returns NULL. 137104746ed8SIngo Molnar */ 137204746ed8SIngo Molnar user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE); 137304746ed8SIngo Molnar if (user_mask) { 137404746ed8SIngo Molnar cpumask_copy(user_mask, in_mask); 137504746ed8SIngo Molnar } else if (IS_ENABLED(CONFIG_SMP)) { 137604746ed8SIngo Molnar return -ENOMEM; 137704746ed8SIngo Molnar } 137804746ed8SIngo Molnar 137904746ed8SIngo Molnar ac = (struct affinity_context){ 138004746ed8SIngo Molnar .new_mask = in_mask, 138104746ed8SIngo Molnar .user_mask = user_mask, 138204746ed8SIngo Molnar .flags = SCA_USER, 138304746ed8SIngo Molnar }; 138404746ed8SIngo Molnar 138504746ed8SIngo Molnar retval = __sched_setaffinity(p, &ac); 138604746ed8SIngo Molnar kfree(ac.user_mask); 138704746ed8SIngo Molnar 138804746ed8SIngo Molnar return retval; 138904746ed8SIngo Molnar } 139004746ed8SIngo Molnar 139104746ed8SIngo Molnar static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 139204746ed8SIngo Molnar struct cpumask *new_mask) 139304746ed8SIngo Molnar { 139404746ed8SIngo Molnar if (len < cpumask_size()) 139504746ed8SIngo Molnar cpumask_clear(new_mask); 139604746ed8SIngo Molnar else if (len > cpumask_size()) 139704746ed8SIngo Molnar len = cpumask_size(); 139804746ed8SIngo Molnar 139904746ed8SIngo Molnar return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 140004746ed8SIngo Molnar } 140104746ed8SIngo Molnar 140204746ed8SIngo Molnar /** 140304746ed8SIngo Molnar * sys_sched_setaffinity - set the CPU affinity of a process 140404746ed8SIngo Molnar * @pid: pid of the process 140504746ed8SIngo Molnar * @len: length in bytes of the bitmask pointed to by user_mask_ptr 140604746ed8SIngo Molnar * @user_mask_ptr: user-space pointer to the new CPU mask 140704746ed8SIngo Molnar * 140804746ed8SIngo Molnar * Return: 0 on success. An error code otherwise. 140904746ed8SIngo Molnar */ 141004746ed8SIngo Molnar SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 141104746ed8SIngo Molnar unsigned long __user *, user_mask_ptr) 141204746ed8SIngo Molnar { 141304746ed8SIngo Molnar cpumask_var_t new_mask; 141404746ed8SIngo Molnar int retval; 141504746ed8SIngo Molnar 141604746ed8SIngo Molnar if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 141704746ed8SIngo Molnar return -ENOMEM; 141804746ed8SIngo Molnar 141904746ed8SIngo Molnar retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 142004746ed8SIngo Molnar if (retval == 0) 142104746ed8SIngo Molnar retval = sched_setaffinity(pid, new_mask); 142204746ed8SIngo Molnar free_cpumask_var(new_mask); 142304746ed8SIngo Molnar return retval; 142404746ed8SIngo Molnar } 142504746ed8SIngo Molnar 142604746ed8SIngo Molnar long sched_getaffinity(pid_t pid, struct cpumask *mask) 142704746ed8SIngo Molnar { 142804746ed8SIngo Molnar struct task_struct *p; 142904746ed8SIngo Molnar int retval; 143004746ed8SIngo Molnar 143104746ed8SIngo Molnar guard(rcu)(); 143204746ed8SIngo Molnar p = find_process_by_pid(pid); 143304746ed8SIngo Molnar if (!p) 143404746ed8SIngo Molnar return -ESRCH; 143504746ed8SIngo Molnar 143604746ed8SIngo Molnar retval = security_task_getscheduler(p); 143704746ed8SIngo Molnar if (retval) 143804746ed8SIngo Molnar return retval; 143904746ed8SIngo Molnar 144004746ed8SIngo Molnar guard(raw_spinlock_irqsave)(&p->pi_lock); 144104746ed8SIngo Molnar cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 144204746ed8SIngo Molnar 144304746ed8SIngo Molnar return 0; 144404746ed8SIngo Molnar } 144504746ed8SIngo Molnar 144604746ed8SIngo Molnar /** 144704746ed8SIngo Molnar * sys_sched_getaffinity - get the CPU affinity of a process 144804746ed8SIngo Molnar * @pid: pid of the process 144904746ed8SIngo Molnar * @len: length in bytes of the bitmask pointed to by user_mask_ptr 145004746ed8SIngo Molnar * @user_mask_ptr: user-space pointer to hold the current CPU mask 145104746ed8SIngo Molnar * 145204746ed8SIngo Molnar * Return: size of CPU mask copied to user_mask_ptr on success. An 145304746ed8SIngo Molnar * error code otherwise. 145404746ed8SIngo Molnar */ 145504746ed8SIngo Molnar SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 145604746ed8SIngo Molnar unsigned long __user *, user_mask_ptr) 145704746ed8SIngo Molnar { 145804746ed8SIngo Molnar int ret; 145904746ed8SIngo Molnar cpumask_var_t mask; 146004746ed8SIngo Molnar 146104746ed8SIngo Molnar if ((len * BITS_PER_BYTE) < nr_cpu_ids) 146204746ed8SIngo Molnar return -EINVAL; 146304746ed8SIngo Molnar if (len & (sizeof(unsigned long)-1)) 146404746ed8SIngo Molnar return -EINVAL; 146504746ed8SIngo Molnar 146604746ed8SIngo Molnar if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 146704746ed8SIngo Molnar return -ENOMEM; 146804746ed8SIngo Molnar 146904746ed8SIngo Molnar ret = sched_getaffinity(pid, mask); 147004746ed8SIngo Molnar if (ret == 0) { 147104746ed8SIngo Molnar unsigned int retlen = min(len, cpumask_size()); 147204746ed8SIngo Molnar 147304746ed8SIngo Molnar if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen)) 147404746ed8SIngo Molnar ret = -EFAULT; 147504746ed8SIngo Molnar else 147604746ed8SIngo Molnar ret = retlen; 147704746ed8SIngo Molnar } 147804746ed8SIngo Molnar free_cpumask_var(mask); 147904746ed8SIngo Molnar 148004746ed8SIngo Molnar return ret; 148104746ed8SIngo Molnar } 148204746ed8SIngo Molnar 148304746ed8SIngo Molnar static void do_sched_yield(void) 148404746ed8SIngo Molnar { 148504746ed8SIngo Molnar struct rq_flags rf; 148604746ed8SIngo Molnar struct rq *rq; 148704746ed8SIngo Molnar 148804746ed8SIngo Molnar rq = this_rq_lock_irq(&rf); 148904746ed8SIngo Molnar 149004746ed8SIngo Molnar schedstat_inc(rq->yld_count); 149104746ed8SIngo Molnar current->sched_class->yield_task(rq); 149204746ed8SIngo Molnar 149304746ed8SIngo Molnar preempt_disable(); 149404746ed8SIngo Molnar rq_unlock_irq(rq, &rf); 149504746ed8SIngo Molnar sched_preempt_enable_no_resched(); 149604746ed8SIngo Molnar 149704746ed8SIngo Molnar schedule(); 149804746ed8SIngo Molnar } 149904746ed8SIngo Molnar 150004746ed8SIngo Molnar /** 150104746ed8SIngo Molnar * sys_sched_yield - yield the current processor to other threads. 150204746ed8SIngo Molnar * 150304746ed8SIngo Molnar * This function yields the current CPU to other tasks. If there are no 150404746ed8SIngo Molnar * other threads running on this CPU then this function will return. 150504746ed8SIngo Molnar * 150604746ed8SIngo Molnar * Return: 0. 150704746ed8SIngo Molnar */ 150804746ed8SIngo Molnar SYSCALL_DEFINE0(sched_yield) 150904746ed8SIngo Molnar { 151004746ed8SIngo Molnar do_sched_yield(); 151104746ed8SIngo Molnar return 0; 151204746ed8SIngo Molnar } 151304746ed8SIngo Molnar 151404746ed8SIngo Molnar /** 151504746ed8SIngo Molnar * yield - yield the current processor to other threads. 151604746ed8SIngo Molnar * 151704746ed8SIngo Molnar * Do not ever use this function, there's a 99% chance you're doing it wrong. 151804746ed8SIngo Molnar * 151904746ed8SIngo Molnar * The scheduler is at all times free to pick the calling task as the most 152004746ed8SIngo Molnar * eligible task to run, if removing the yield() call from your code breaks 152104746ed8SIngo Molnar * it, it's already broken. 152204746ed8SIngo Molnar * 152304746ed8SIngo Molnar * Typical broken usage is: 152404746ed8SIngo Molnar * 152504746ed8SIngo Molnar * while (!event) 152604746ed8SIngo Molnar * yield(); 152704746ed8SIngo Molnar * 152804746ed8SIngo Molnar * where one assumes that yield() will let 'the other' process run that will 152904746ed8SIngo Molnar * make event true. If the current task is a SCHED_FIFO task that will never 153004746ed8SIngo Molnar * happen. Never use yield() as a progress guarantee!! 153104746ed8SIngo Molnar * 153204746ed8SIngo Molnar * If you want to use yield() to wait for something, use wait_event(). 153304746ed8SIngo Molnar * If you want to use yield() to be 'nice' for others, use cond_resched(). 153404746ed8SIngo Molnar * If you still want to use yield(), do not! 153504746ed8SIngo Molnar */ 153604746ed8SIngo Molnar void __sched yield(void) 153704746ed8SIngo Molnar { 153804746ed8SIngo Molnar set_current_state(TASK_RUNNING); 153904746ed8SIngo Molnar do_sched_yield(); 154004746ed8SIngo Molnar } 154104746ed8SIngo Molnar EXPORT_SYMBOL(yield); 154204746ed8SIngo Molnar 154304746ed8SIngo Molnar /** 154404746ed8SIngo Molnar * yield_to - yield the current processor to another thread in 154504746ed8SIngo Molnar * your thread group, or accelerate that thread toward the 154604746ed8SIngo Molnar * processor it's on. 154704746ed8SIngo Molnar * @p: target task 154804746ed8SIngo Molnar * @preempt: whether task preemption is allowed or not 154904746ed8SIngo Molnar * 155004746ed8SIngo Molnar * It's the caller's job to ensure that the target task struct 155104746ed8SIngo Molnar * can't go away on us before we can do any checks. 155204746ed8SIngo Molnar * 155304746ed8SIngo Molnar * Return: 155404746ed8SIngo Molnar * true (>0) if we indeed boosted the target task. 155504746ed8SIngo Molnar * false (0) if we failed to boost the target. 155604746ed8SIngo Molnar * -ESRCH if there's no task to yield to. 155704746ed8SIngo Molnar */ 155804746ed8SIngo Molnar int __sched yield_to(struct task_struct *p, bool preempt) 155904746ed8SIngo Molnar { 156004746ed8SIngo Molnar struct task_struct *curr = current; 156104746ed8SIngo Molnar struct rq *rq, *p_rq; 156204746ed8SIngo Molnar int yielded = 0; 156304746ed8SIngo Molnar 156404746ed8SIngo Molnar scoped_guard (irqsave) { 156504746ed8SIngo Molnar rq = this_rq(); 156604746ed8SIngo Molnar 156704746ed8SIngo Molnar again: 156804746ed8SIngo Molnar p_rq = task_rq(p); 156904746ed8SIngo Molnar /* 157004746ed8SIngo Molnar * If we're the only runnable task on the rq and target rq also 157104746ed8SIngo Molnar * has only one task, there's absolutely no point in yielding. 157204746ed8SIngo Molnar */ 157304746ed8SIngo Molnar if (rq->nr_running == 1 && p_rq->nr_running == 1) 157404746ed8SIngo Molnar return -ESRCH; 157504746ed8SIngo Molnar 157604746ed8SIngo Molnar guard(double_rq_lock)(rq, p_rq); 157704746ed8SIngo Molnar if (task_rq(p) != p_rq) 157804746ed8SIngo Molnar goto again; 157904746ed8SIngo Molnar 158004746ed8SIngo Molnar if (!curr->sched_class->yield_to_task) 158104746ed8SIngo Molnar return 0; 158204746ed8SIngo Molnar 158304746ed8SIngo Molnar if (curr->sched_class != p->sched_class) 158404746ed8SIngo Molnar return 0; 158504746ed8SIngo Molnar 158604746ed8SIngo Molnar if (task_on_cpu(p_rq, p) || !task_is_running(p)) 158704746ed8SIngo Molnar return 0; 158804746ed8SIngo Molnar 158904746ed8SIngo Molnar yielded = curr->sched_class->yield_to_task(rq, p); 159004746ed8SIngo Molnar if (yielded) { 159104746ed8SIngo Molnar schedstat_inc(rq->yld_count); 159204746ed8SIngo Molnar /* 159304746ed8SIngo Molnar * Make p's CPU reschedule; pick_next_entity 159404746ed8SIngo Molnar * takes care of fairness. 159504746ed8SIngo Molnar */ 159604746ed8SIngo Molnar if (preempt && rq != p_rq) 159704746ed8SIngo Molnar resched_curr(p_rq); 159804746ed8SIngo Molnar } 159904746ed8SIngo Molnar } 160004746ed8SIngo Molnar 160104746ed8SIngo Molnar if (yielded) 160204746ed8SIngo Molnar schedule(); 160304746ed8SIngo Molnar 160404746ed8SIngo Molnar return yielded; 160504746ed8SIngo Molnar } 160604746ed8SIngo Molnar EXPORT_SYMBOL_GPL(yield_to); 160704746ed8SIngo Molnar 160804746ed8SIngo Molnar /** 160904746ed8SIngo Molnar * sys_sched_get_priority_max - return maximum RT priority. 161004746ed8SIngo Molnar * @policy: scheduling class. 161104746ed8SIngo Molnar * 161204746ed8SIngo Molnar * Return: On success, this syscall returns the maximum 161304746ed8SIngo Molnar * rt_priority that can be used by a given scheduling class. 161404746ed8SIngo Molnar * On failure, a negative error code is returned. 161504746ed8SIngo Molnar */ 161604746ed8SIngo Molnar SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 161704746ed8SIngo Molnar { 161804746ed8SIngo Molnar int ret = -EINVAL; 161904746ed8SIngo Molnar 162004746ed8SIngo Molnar switch (policy) { 162104746ed8SIngo Molnar case SCHED_FIFO: 162204746ed8SIngo Molnar case SCHED_RR: 162304746ed8SIngo Molnar ret = MAX_RT_PRIO-1; 162404746ed8SIngo Molnar break; 162504746ed8SIngo Molnar case SCHED_DEADLINE: 162604746ed8SIngo Molnar case SCHED_NORMAL: 162704746ed8SIngo Molnar case SCHED_BATCH: 162804746ed8SIngo Molnar case SCHED_IDLE: 1629f0e1a064STejun Heo case SCHED_EXT: 163004746ed8SIngo Molnar ret = 0; 163104746ed8SIngo Molnar break; 163204746ed8SIngo Molnar } 163304746ed8SIngo Molnar return ret; 163404746ed8SIngo Molnar } 163504746ed8SIngo Molnar 163604746ed8SIngo Molnar /** 163704746ed8SIngo Molnar * sys_sched_get_priority_min - return minimum RT priority. 163804746ed8SIngo Molnar * @policy: scheduling class. 163904746ed8SIngo Molnar * 164004746ed8SIngo Molnar * Return: On success, this syscall returns the minimum 164104746ed8SIngo Molnar * rt_priority that can be used by a given scheduling class. 164204746ed8SIngo Molnar * On failure, a negative error code is returned. 164304746ed8SIngo Molnar */ 164404746ed8SIngo Molnar SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 164504746ed8SIngo Molnar { 164604746ed8SIngo Molnar int ret = -EINVAL; 164704746ed8SIngo Molnar 164804746ed8SIngo Molnar switch (policy) { 164904746ed8SIngo Molnar case SCHED_FIFO: 165004746ed8SIngo Molnar case SCHED_RR: 165104746ed8SIngo Molnar ret = 1; 165204746ed8SIngo Molnar break; 165304746ed8SIngo Molnar case SCHED_DEADLINE: 165404746ed8SIngo Molnar case SCHED_NORMAL: 165504746ed8SIngo Molnar case SCHED_BATCH: 165604746ed8SIngo Molnar case SCHED_IDLE: 1657f0e1a064STejun Heo case SCHED_EXT: 165804746ed8SIngo Molnar ret = 0; 165904746ed8SIngo Molnar } 166004746ed8SIngo Molnar return ret; 166104746ed8SIngo Molnar } 166204746ed8SIngo Molnar 166304746ed8SIngo Molnar static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 166404746ed8SIngo Molnar { 166504746ed8SIngo Molnar unsigned int time_slice = 0; 166604746ed8SIngo Molnar int retval; 166704746ed8SIngo Molnar 166804746ed8SIngo Molnar if (pid < 0) 166904746ed8SIngo Molnar return -EINVAL; 167004746ed8SIngo Molnar 167104746ed8SIngo Molnar scoped_guard (rcu) { 167204746ed8SIngo Molnar struct task_struct *p = find_process_by_pid(pid); 167304746ed8SIngo Molnar if (!p) 167404746ed8SIngo Molnar return -ESRCH; 167504746ed8SIngo Molnar 167604746ed8SIngo Molnar retval = security_task_getscheduler(p); 167704746ed8SIngo Molnar if (retval) 167804746ed8SIngo Molnar return retval; 167904746ed8SIngo Molnar 168004746ed8SIngo Molnar scoped_guard (task_rq_lock, p) { 168104746ed8SIngo Molnar struct rq *rq = scope.rq; 168204746ed8SIngo Molnar if (p->sched_class->get_rr_interval) 168304746ed8SIngo Molnar time_slice = p->sched_class->get_rr_interval(rq, p); 168404746ed8SIngo Molnar } 168504746ed8SIngo Molnar } 168604746ed8SIngo Molnar 168704746ed8SIngo Molnar jiffies_to_timespec64(time_slice, t); 168804746ed8SIngo Molnar return 0; 168904746ed8SIngo Molnar } 169004746ed8SIngo Molnar 169104746ed8SIngo Molnar /** 1692402de7fcSIngo Molnar * sys_sched_rr_get_interval - return the default time-slice of a process. 169304746ed8SIngo Molnar * @pid: pid of the process. 1694402de7fcSIngo Molnar * @interval: userspace pointer to the time-slice value. 169504746ed8SIngo Molnar * 1696402de7fcSIngo Molnar * this syscall writes the default time-slice value of a given process 169704746ed8SIngo Molnar * into the user-space timespec buffer. A value of '0' means infinity. 169804746ed8SIngo Molnar * 1699402de7fcSIngo Molnar * Return: On success, 0 and the time-slice is in @interval. Otherwise, 170004746ed8SIngo Molnar * an error code. 170104746ed8SIngo Molnar */ 170204746ed8SIngo Molnar SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 170304746ed8SIngo Molnar struct __kernel_timespec __user *, interval) 170404746ed8SIngo Molnar { 170504746ed8SIngo Molnar struct timespec64 t; 170604746ed8SIngo Molnar int retval = sched_rr_get_interval(pid, &t); 170704746ed8SIngo Molnar 170804746ed8SIngo Molnar if (retval == 0) 170904746ed8SIngo Molnar retval = put_timespec64(&t, interval); 171004746ed8SIngo Molnar 171104746ed8SIngo Molnar return retval; 171204746ed8SIngo Molnar } 171304746ed8SIngo Molnar 171404746ed8SIngo Molnar #ifdef CONFIG_COMPAT_32BIT_TIME 171504746ed8SIngo Molnar SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 171604746ed8SIngo Molnar struct old_timespec32 __user *, interval) 171704746ed8SIngo Molnar { 171804746ed8SIngo Molnar struct timespec64 t; 171904746ed8SIngo Molnar int retval = sched_rr_get_interval(pid, &t); 172004746ed8SIngo Molnar 172104746ed8SIngo Molnar if (retval == 0) 172204746ed8SIngo Molnar retval = put_old_timespec32(&t, interval); 172304746ed8SIngo Molnar return retval; 172404746ed8SIngo Molnar } 172504746ed8SIngo Molnar #endif 1726