xref: /linux/kernel/sched/syscalls.c (revision 857b158dc5e81c6de795ef6be006eed146098fc6)
104746ed8SIngo Molnar // SPDX-License-Identifier: GPL-2.0-only
204746ed8SIngo Molnar /*
304746ed8SIngo Molnar  *  kernel/sched/syscalls.c
404746ed8SIngo Molnar  *
504746ed8SIngo Molnar  *  Core kernel scheduler syscalls related code
604746ed8SIngo Molnar  *
704746ed8SIngo Molnar  *  Copyright (C) 1991-2002  Linus Torvalds
804746ed8SIngo Molnar  *  Copyright (C) 1998-2024  Ingo Molnar, Red Hat
904746ed8SIngo Molnar  */
1004746ed8SIngo Molnar #include <linux/sched.h>
1104746ed8SIngo Molnar #include <linux/cpuset.h>
1204746ed8SIngo Molnar #include <linux/sched/debug.h>
1304746ed8SIngo Molnar 
1404746ed8SIngo Molnar #include <uapi/linux/sched/types.h>
1504746ed8SIngo Molnar 
1604746ed8SIngo Molnar #include "sched.h"
1704746ed8SIngo Molnar #include "autogroup.h"
1804746ed8SIngo Molnar 
1904746ed8SIngo Molnar static inline int __normal_prio(int policy, int rt_prio, int nice)
2004746ed8SIngo Molnar {
2104746ed8SIngo Molnar 	int prio;
2204746ed8SIngo Molnar 
2304746ed8SIngo Molnar 	if (dl_policy(policy))
2404746ed8SIngo Molnar 		prio = MAX_DL_PRIO - 1;
2504746ed8SIngo Molnar 	else if (rt_policy(policy))
2604746ed8SIngo Molnar 		prio = MAX_RT_PRIO - 1 - rt_prio;
2704746ed8SIngo Molnar 	else
2804746ed8SIngo Molnar 		prio = NICE_TO_PRIO(nice);
2904746ed8SIngo Molnar 
3004746ed8SIngo Molnar 	return prio;
3104746ed8SIngo Molnar }
3204746ed8SIngo Molnar 
3304746ed8SIngo Molnar /*
3404746ed8SIngo Molnar  * Calculate the expected normal priority: i.e. priority
3504746ed8SIngo Molnar  * without taking RT-inheritance into account. Might be
3604746ed8SIngo Molnar  * boosted by interactivity modifiers. Changes upon fork,
3704746ed8SIngo Molnar  * setprio syscalls, and whenever the interactivity
3804746ed8SIngo Molnar  * estimator recalculates.
3904746ed8SIngo Molnar  */
4004746ed8SIngo Molnar static inline int normal_prio(struct task_struct *p)
4104746ed8SIngo Molnar {
4204746ed8SIngo Molnar 	return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
4304746ed8SIngo Molnar }
4404746ed8SIngo Molnar 
4504746ed8SIngo Molnar /*
4604746ed8SIngo Molnar  * Calculate the current priority, i.e. the priority
4704746ed8SIngo Molnar  * taken into account by the scheduler. This value might
4804746ed8SIngo Molnar  * be boosted by RT tasks, or might be boosted by
4904746ed8SIngo Molnar  * interactivity modifiers. Will be RT if the task got
5004746ed8SIngo Molnar  * RT-boosted. If not then it returns p->normal_prio.
5104746ed8SIngo Molnar  */
5204746ed8SIngo Molnar static int effective_prio(struct task_struct *p)
5304746ed8SIngo Molnar {
5404746ed8SIngo Molnar 	p->normal_prio = normal_prio(p);
5504746ed8SIngo Molnar 	/*
5604746ed8SIngo Molnar 	 * If we are RT tasks or we were boosted to RT priority,
5704746ed8SIngo Molnar 	 * keep the priority unchanged. Otherwise, update priority
5804746ed8SIngo Molnar 	 * to the normal priority:
5904746ed8SIngo Molnar 	 */
60ae04f69dSQais Yousef 	if (!rt_or_dl_prio(p->prio))
6104746ed8SIngo Molnar 		return p->normal_prio;
6204746ed8SIngo Molnar 	return p->prio;
6304746ed8SIngo Molnar }
6404746ed8SIngo Molnar 
6504746ed8SIngo Molnar void set_user_nice(struct task_struct *p, long nice)
6604746ed8SIngo Molnar {
6704746ed8SIngo Molnar 	bool queued, running;
6804746ed8SIngo Molnar 	struct rq *rq;
6904746ed8SIngo Molnar 	int old_prio;
7004746ed8SIngo Molnar 
7104746ed8SIngo Molnar 	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
7204746ed8SIngo Molnar 		return;
7304746ed8SIngo Molnar 	/*
7404746ed8SIngo Molnar 	 * We have to be careful, if called from sys_setpriority(),
7504746ed8SIngo Molnar 	 * the task might be in the middle of scheduling on another CPU.
7604746ed8SIngo Molnar 	 */
7704746ed8SIngo Molnar 	CLASS(task_rq_lock, rq_guard)(p);
7804746ed8SIngo Molnar 	rq = rq_guard.rq;
7904746ed8SIngo Molnar 
8004746ed8SIngo Molnar 	update_rq_clock(rq);
8104746ed8SIngo Molnar 
8204746ed8SIngo Molnar 	/*
8304746ed8SIngo Molnar 	 * The RT priorities are set via sched_setscheduler(), but we still
8404746ed8SIngo Molnar 	 * allow the 'normal' nice value to be set - but as expected
8504746ed8SIngo Molnar 	 * it won't have any effect on scheduling until the task is
8604746ed8SIngo Molnar 	 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
8704746ed8SIngo Molnar 	 */
8804746ed8SIngo Molnar 	if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
8904746ed8SIngo Molnar 		p->static_prio = NICE_TO_PRIO(nice);
9004746ed8SIngo Molnar 		return;
9104746ed8SIngo Molnar 	}
9204746ed8SIngo Molnar 
9304746ed8SIngo Molnar 	queued = task_on_rq_queued(p);
9404746ed8SIngo Molnar 	running = task_current(rq, p);
9504746ed8SIngo Molnar 	if (queued)
9604746ed8SIngo Molnar 		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
9704746ed8SIngo Molnar 	if (running)
9804746ed8SIngo Molnar 		put_prev_task(rq, p);
9904746ed8SIngo Molnar 
10004746ed8SIngo Molnar 	p->static_prio = NICE_TO_PRIO(nice);
10104746ed8SIngo Molnar 	set_load_weight(p, true);
10204746ed8SIngo Molnar 	old_prio = p->prio;
10304746ed8SIngo Molnar 	p->prio = effective_prio(p);
10404746ed8SIngo Molnar 
10504746ed8SIngo Molnar 	if (queued)
10604746ed8SIngo Molnar 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
10704746ed8SIngo Molnar 	if (running)
10804746ed8SIngo Molnar 		set_next_task(rq, p);
10904746ed8SIngo Molnar 
11004746ed8SIngo Molnar 	/*
11104746ed8SIngo Molnar 	 * If the task increased its priority or is running and
11204746ed8SIngo Molnar 	 * lowered its priority, then reschedule its CPU:
11304746ed8SIngo Molnar 	 */
11404746ed8SIngo Molnar 	p->sched_class->prio_changed(rq, p, old_prio);
11504746ed8SIngo Molnar }
11604746ed8SIngo Molnar EXPORT_SYMBOL(set_user_nice);
11704746ed8SIngo Molnar 
11804746ed8SIngo Molnar /*
11904746ed8SIngo Molnar  * is_nice_reduction - check if nice value is an actual reduction
12004746ed8SIngo Molnar  *
12104746ed8SIngo Molnar  * Similar to can_nice() but does not perform a capability check.
12204746ed8SIngo Molnar  *
12304746ed8SIngo Molnar  * @p: task
12404746ed8SIngo Molnar  * @nice: nice value
12504746ed8SIngo Molnar  */
12604746ed8SIngo Molnar static bool is_nice_reduction(const struct task_struct *p, const int nice)
12704746ed8SIngo Molnar {
12804746ed8SIngo Molnar 	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
12904746ed8SIngo Molnar 	int nice_rlim = nice_to_rlimit(nice);
13004746ed8SIngo Molnar 
13104746ed8SIngo Molnar 	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
13204746ed8SIngo Molnar }
13304746ed8SIngo Molnar 
13404746ed8SIngo Molnar /*
13504746ed8SIngo Molnar  * can_nice - check if a task can reduce its nice value
13604746ed8SIngo Molnar  * @p: task
13704746ed8SIngo Molnar  * @nice: nice value
13804746ed8SIngo Molnar  */
13904746ed8SIngo Molnar int can_nice(const struct task_struct *p, const int nice)
14004746ed8SIngo Molnar {
14104746ed8SIngo Molnar 	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
14204746ed8SIngo Molnar }
14304746ed8SIngo Molnar 
14404746ed8SIngo Molnar #ifdef __ARCH_WANT_SYS_NICE
14504746ed8SIngo Molnar 
14604746ed8SIngo Molnar /*
14704746ed8SIngo Molnar  * sys_nice - change the priority of the current process.
14804746ed8SIngo Molnar  * @increment: priority increment
14904746ed8SIngo Molnar  *
15004746ed8SIngo Molnar  * sys_setpriority is a more generic, but much slower function that
15104746ed8SIngo Molnar  * does similar things.
15204746ed8SIngo Molnar  */
15304746ed8SIngo Molnar SYSCALL_DEFINE1(nice, int, increment)
15404746ed8SIngo Molnar {
15504746ed8SIngo Molnar 	long nice, retval;
15604746ed8SIngo Molnar 
15704746ed8SIngo Molnar 	/*
15804746ed8SIngo Molnar 	 * Setpriority might change our priority at the same moment.
15904746ed8SIngo Molnar 	 * We don't have to worry. Conceptually one call occurs first
16004746ed8SIngo Molnar 	 * and we have a single winner.
16104746ed8SIngo Molnar 	 */
16204746ed8SIngo Molnar 	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
16304746ed8SIngo Molnar 	nice = task_nice(current) + increment;
16404746ed8SIngo Molnar 
16504746ed8SIngo Molnar 	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
16604746ed8SIngo Molnar 	if (increment < 0 && !can_nice(current, nice))
16704746ed8SIngo Molnar 		return -EPERM;
16804746ed8SIngo Molnar 
16904746ed8SIngo Molnar 	retval = security_task_setnice(current, nice);
17004746ed8SIngo Molnar 	if (retval)
17104746ed8SIngo Molnar 		return retval;
17204746ed8SIngo Molnar 
17304746ed8SIngo Molnar 	set_user_nice(current, nice);
17404746ed8SIngo Molnar 	return 0;
17504746ed8SIngo Molnar }
17604746ed8SIngo Molnar 
17704746ed8SIngo Molnar #endif
17804746ed8SIngo Molnar 
17904746ed8SIngo Molnar /**
18004746ed8SIngo Molnar  * task_prio - return the priority value of a given task.
18104746ed8SIngo Molnar  * @p: the task in question.
18204746ed8SIngo Molnar  *
18304746ed8SIngo Molnar  * Return: The priority value as seen by users in /proc.
18404746ed8SIngo Molnar  *
18504746ed8SIngo Molnar  * sched policy         return value   kernel prio    user prio/nice
18604746ed8SIngo Molnar  *
18704746ed8SIngo Molnar  * normal, batch, idle     [0 ... 39]  [100 ... 139]          0/[-20 ... 19]
18804746ed8SIngo Molnar  * fifo, rr             [-2 ... -100]     [98 ... 0]  [1 ... 99]
18904746ed8SIngo Molnar  * deadline                     -101             -1           0
19004746ed8SIngo Molnar  */
19104746ed8SIngo Molnar int task_prio(const struct task_struct *p)
19204746ed8SIngo Molnar {
19304746ed8SIngo Molnar 	return p->prio - MAX_RT_PRIO;
19404746ed8SIngo Molnar }
19504746ed8SIngo Molnar 
19604746ed8SIngo Molnar /**
19704746ed8SIngo Molnar  * idle_cpu - is a given CPU idle currently?
19804746ed8SIngo Molnar  * @cpu: the processor in question.
19904746ed8SIngo Molnar  *
20004746ed8SIngo Molnar  * Return: 1 if the CPU is currently idle. 0 otherwise.
20104746ed8SIngo Molnar  */
20204746ed8SIngo Molnar int idle_cpu(int cpu)
20304746ed8SIngo Molnar {
20404746ed8SIngo Molnar 	struct rq *rq = cpu_rq(cpu);
20504746ed8SIngo Molnar 
20604746ed8SIngo Molnar 	if (rq->curr != rq->idle)
20704746ed8SIngo Molnar 		return 0;
20804746ed8SIngo Molnar 
20904746ed8SIngo Molnar 	if (rq->nr_running)
21004746ed8SIngo Molnar 		return 0;
21104746ed8SIngo Molnar 
21204746ed8SIngo Molnar #ifdef CONFIG_SMP
21304746ed8SIngo Molnar 	if (rq->ttwu_pending)
21404746ed8SIngo Molnar 		return 0;
21504746ed8SIngo Molnar #endif
21604746ed8SIngo Molnar 
21704746ed8SIngo Molnar 	return 1;
21804746ed8SIngo Molnar }
21904746ed8SIngo Molnar 
22004746ed8SIngo Molnar /**
22104746ed8SIngo Molnar  * available_idle_cpu - is a given CPU idle for enqueuing work.
22204746ed8SIngo Molnar  * @cpu: the CPU in question.
22304746ed8SIngo Molnar  *
22404746ed8SIngo Molnar  * Return: 1 if the CPU is currently idle. 0 otherwise.
22504746ed8SIngo Molnar  */
22604746ed8SIngo Molnar int available_idle_cpu(int cpu)
22704746ed8SIngo Molnar {
22804746ed8SIngo Molnar 	if (!idle_cpu(cpu))
22904746ed8SIngo Molnar 		return 0;
23004746ed8SIngo Molnar 
23104746ed8SIngo Molnar 	if (vcpu_is_preempted(cpu))
23204746ed8SIngo Molnar 		return 0;
23304746ed8SIngo Molnar 
23404746ed8SIngo Molnar 	return 1;
23504746ed8SIngo Molnar }
23604746ed8SIngo Molnar 
23704746ed8SIngo Molnar /**
23804746ed8SIngo Molnar  * idle_task - return the idle task for a given CPU.
23904746ed8SIngo Molnar  * @cpu: the processor in question.
24004746ed8SIngo Molnar  *
24104746ed8SIngo Molnar  * Return: The idle task for the CPU @cpu.
24204746ed8SIngo Molnar  */
24304746ed8SIngo Molnar struct task_struct *idle_task(int cpu)
24404746ed8SIngo Molnar {
24504746ed8SIngo Molnar 	return cpu_rq(cpu)->idle;
24604746ed8SIngo Molnar }
24704746ed8SIngo Molnar 
24804746ed8SIngo Molnar #ifdef CONFIG_SCHED_CORE
24904746ed8SIngo Molnar int sched_core_idle_cpu(int cpu)
25004746ed8SIngo Molnar {
25104746ed8SIngo Molnar 	struct rq *rq = cpu_rq(cpu);
25204746ed8SIngo Molnar 
25304746ed8SIngo Molnar 	if (sched_core_enabled(rq) && rq->curr == rq->idle)
25404746ed8SIngo Molnar 		return 1;
25504746ed8SIngo Molnar 
25604746ed8SIngo Molnar 	return idle_cpu(cpu);
25704746ed8SIngo Molnar }
25804746ed8SIngo Molnar 
25904746ed8SIngo Molnar #endif
26004746ed8SIngo Molnar 
26104746ed8SIngo Molnar #ifdef CONFIG_SMP
26204746ed8SIngo Molnar /*
26304746ed8SIngo Molnar  * This function computes an effective utilization for the given CPU, to be
26404746ed8SIngo Molnar  * used for frequency selection given the linear relation: f = u * f_max.
26504746ed8SIngo Molnar  *
26604746ed8SIngo Molnar  * The scheduler tracks the following metrics:
26704746ed8SIngo Molnar  *
26804746ed8SIngo Molnar  *   cpu_util_{cfs,rt,dl,irq}()
26904746ed8SIngo Molnar  *   cpu_bw_dl()
27004746ed8SIngo Molnar  *
27104746ed8SIngo Molnar  * Where the cfs,rt and dl util numbers are tracked with the same metric and
27204746ed8SIngo Molnar  * synchronized windows and are thus directly comparable.
27304746ed8SIngo Molnar  *
27404746ed8SIngo Molnar  * The cfs,rt,dl utilization are the running times measured with rq->clock_task
27504746ed8SIngo Molnar  * which excludes things like IRQ and steal-time. These latter are then accrued
276402de7fcSIngo Molnar  * in the IRQ utilization.
27704746ed8SIngo Molnar  *
278402de7fcSIngo Molnar  * The DL bandwidth number OTOH is not a measured metric but a value computed
27904746ed8SIngo Molnar  * based on the task model parameters and gives the minimal utilization
28004746ed8SIngo Molnar  * required to meet deadlines.
28104746ed8SIngo Molnar  */
28204746ed8SIngo Molnar unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
28304746ed8SIngo Molnar 				 unsigned long *min,
28404746ed8SIngo Molnar 				 unsigned long *max)
28504746ed8SIngo Molnar {
28604746ed8SIngo Molnar 	unsigned long util, irq, scale;
28704746ed8SIngo Molnar 	struct rq *rq = cpu_rq(cpu);
28804746ed8SIngo Molnar 
28904746ed8SIngo Molnar 	scale = arch_scale_cpu_capacity(cpu);
29004746ed8SIngo Molnar 
29104746ed8SIngo Molnar 	/*
29204746ed8SIngo Molnar 	 * Early check to see if IRQ/steal time saturates the CPU, can be
29304746ed8SIngo Molnar 	 * because of inaccuracies in how we track these -- see
29404746ed8SIngo Molnar 	 * update_irq_load_avg().
29504746ed8SIngo Molnar 	 */
29604746ed8SIngo Molnar 	irq = cpu_util_irq(rq);
29704746ed8SIngo Molnar 	if (unlikely(irq >= scale)) {
29804746ed8SIngo Molnar 		if (min)
29904746ed8SIngo Molnar 			*min = scale;
30004746ed8SIngo Molnar 		if (max)
30104746ed8SIngo Molnar 			*max = scale;
30204746ed8SIngo Molnar 		return scale;
30304746ed8SIngo Molnar 	}
30404746ed8SIngo Molnar 
30504746ed8SIngo Molnar 	if (min) {
30604746ed8SIngo Molnar 		/*
30704746ed8SIngo Molnar 		 * The minimum utilization returns the highest level between:
30804746ed8SIngo Molnar 		 * - the computed DL bandwidth needed with the IRQ pressure which
30904746ed8SIngo Molnar 		 *   steals time to the deadline task.
31004746ed8SIngo Molnar 		 * - The minimum performance requirement for CFS and/or RT.
31104746ed8SIngo Molnar 		 */
31204746ed8SIngo Molnar 		*min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN));
31304746ed8SIngo Molnar 
31404746ed8SIngo Molnar 		/*
31504746ed8SIngo Molnar 		 * When an RT task is runnable and uclamp is not used, we must
31604746ed8SIngo Molnar 		 * ensure that the task will run at maximum compute capacity.
31704746ed8SIngo Molnar 		 */
31804746ed8SIngo Molnar 		if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt))
31904746ed8SIngo Molnar 			*min = max(*min, scale);
32004746ed8SIngo Molnar 	}
32104746ed8SIngo Molnar 
32204746ed8SIngo Molnar 	/*
32304746ed8SIngo Molnar 	 * Because the time spend on RT/DL tasks is visible as 'lost' time to
32404746ed8SIngo Molnar 	 * CFS tasks and we use the same metric to track the effective
32504746ed8SIngo Molnar 	 * utilization (PELT windows are synchronized) we can directly add them
32604746ed8SIngo Molnar 	 * to obtain the CPU's actual utilization.
32704746ed8SIngo Molnar 	 */
32804746ed8SIngo Molnar 	util = util_cfs + cpu_util_rt(rq);
32904746ed8SIngo Molnar 	util += cpu_util_dl(rq);
33004746ed8SIngo Molnar 
33104746ed8SIngo Molnar 	/*
33204746ed8SIngo Molnar 	 * The maximum hint is a soft bandwidth requirement, which can be lower
33304746ed8SIngo Molnar 	 * than the actual utilization because of uclamp_max requirements.
33404746ed8SIngo Molnar 	 */
33504746ed8SIngo Molnar 	if (max)
33604746ed8SIngo Molnar 		*max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX));
33704746ed8SIngo Molnar 
33804746ed8SIngo Molnar 	if (util >= scale)
33904746ed8SIngo Molnar 		return scale;
34004746ed8SIngo Molnar 
34104746ed8SIngo Molnar 	/*
34204746ed8SIngo Molnar 	 * There is still idle time; further improve the number by using the
343402de7fcSIngo Molnar 	 * IRQ metric. Because IRQ/steal time is hidden from the task clock we
34404746ed8SIngo Molnar 	 * need to scale the task numbers:
34504746ed8SIngo Molnar 	 *
34604746ed8SIngo Molnar 	 *              max - irq
34704746ed8SIngo Molnar 	 *   U' = irq + --------- * U
34804746ed8SIngo Molnar 	 *                 max
34904746ed8SIngo Molnar 	 */
35004746ed8SIngo Molnar 	util = scale_irq_capacity(util, irq, scale);
35104746ed8SIngo Molnar 	util += irq;
35204746ed8SIngo Molnar 
35304746ed8SIngo Molnar 	return min(scale, util);
35404746ed8SIngo Molnar }
35504746ed8SIngo Molnar 
35604746ed8SIngo Molnar unsigned long sched_cpu_util(int cpu)
35704746ed8SIngo Molnar {
35804746ed8SIngo Molnar 	return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL);
35904746ed8SIngo Molnar }
36004746ed8SIngo Molnar #endif /* CONFIG_SMP */
36104746ed8SIngo Molnar 
36204746ed8SIngo Molnar /**
36304746ed8SIngo Molnar  * find_process_by_pid - find a process with a matching PID value.
36404746ed8SIngo Molnar  * @pid: the pid in question.
36504746ed8SIngo Molnar  *
36604746ed8SIngo Molnar  * The task of @pid, if found. %NULL otherwise.
36704746ed8SIngo Molnar  */
36804746ed8SIngo Molnar static struct task_struct *find_process_by_pid(pid_t pid)
36904746ed8SIngo Molnar {
37004746ed8SIngo Molnar 	return pid ? find_task_by_vpid(pid) : current;
37104746ed8SIngo Molnar }
37204746ed8SIngo Molnar 
37304746ed8SIngo Molnar static struct task_struct *find_get_task(pid_t pid)
37404746ed8SIngo Molnar {
37504746ed8SIngo Molnar 	struct task_struct *p;
37604746ed8SIngo Molnar 	guard(rcu)();
37704746ed8SIngo Molnar 
37804746ed8SIngo Molnar 	p = find_process_by_pid(pid);
37904746ed8SIngo Molnar 	if (likely(p))
38004746ed8SIngo Molnar 		get_task_struct(p);
38104746ed8SIngo Molnar 
38204746ed8SIngo Molnar 	return p;
38304746ed8SIngo Molnar }
38404746ed8SIngo Molnar 
38504746ed8SIngo Molnar DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T),
38604746ed8SIngo Molnar 	     find_get_task(pid), pid_t pid)
38704746ed8SIngo Molnar 
38804746ed8SIngo Molnar /*
38904746ed8SIngo Molnar  * sched_setparam() passes in -1 for its policy, to let the functions
39004746ed8SIngo Molnar  * it calls know not to change it.
39104746ed8SIngo Molnar  */
39204746ed8SIngo Molnar #define SETPARAM_POLICY	-1
39304746ed8SIngo Molnar 
39404746ed8SIngo Molnar static void __setscheduler_params(struct task_struct *p,
39504746ed8SIngo Molnar 		const struct sched_attr *attr)
39604746ed8SIngo Molnar {
39704746ed8SIngo Molnar 	int policy = attr->sched_policy;
39804746ed8SIngo Molnar 
39904746ed8SIngo Molnar 	if (policy == SETPARAM_POLICY)
40004746ed8SIngo Molnar 		policy = p->policy;
40104746ed8SIngo Molnar 
40204746ed8SIngo Molnar 	p->policy = policy;
40304746ed8SIngo Molnar 
404*857b158dSPeter Zijlstra 	if (dl_policy(policy)) {
40504746ed8SIngo Molnar 		__setparam_dl(p, attr);
406*857b158dSPeter Zijlstra 	} else if (fair_policy(policy)) {
40704746ed8SIngo Molnar 		p->static_prio = NICE_TO_PRIO(attr->sched_nice);
408*857b158dSPeter Zijlstra 		if (attr->sched_runtime) {
409*857b158dSPeter Zijlstra 			p->se.custom_slice = 1;
410*857b158dSPeter Zijlstra 			p->se.slice = clamp_t(u64, attr->sched_runtime,
411*857b158dSPeter Zijlstra 					      NSEC_PER_MSEC/10,   /* HZ=1000 * 10 */
412*857b158dSPeter Zijlstra 					      NSEC_PER_MSEC*100); /* HZ=100  / 10 */
413*857b158dSPeter Zijlstra 		} else {
414*857b158dSPeter Zijlstra 			p->se.custom_slice = 0;
415*857b158dSPeter Zijlstra 			p->se.slice = sysctl_sched_base_slice;
416*857b158dSPeter Zijlstra 		}
417*857b158dSPeter Zijlstra 	}
41804746ed8SIngo Molnar 
41904746ed8SIngo Molnar 	/*
42004746ed8SIngo Molnar 	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
42104746ed8SIngo Molnar 	 * !rt_policy. Always setting this ensures that things like
42204746ed8SIngo Molnar 	 * getparam()/getattr() don't report silly values for !rt tasks.
42304746ed8SIngo Molnar 	 */
42404746ed8SIngo Molnar 	p->rt_priority = attr->sched_priority;
42504746ed8SIngo Molnar 	p->normal_prio = normal_prio(p);
42604746ed8SIngo Molnar 	set_load_weight(p, true);
42704746ed8SIngo Molnar }
42804746ed8SIngo Molnar 
42904746ed8SIngo Molnar /*
43004746ed8SIngo Molnar  * Check the target process has a UID that matches the current process's:
43104746ed8SIngo Molnar  */
43204746ed8SIngo Molnar static bool check_same_owner(struct task_struct *p)
43304746ed8SIngo Molnar {
43404746ed8SIngo Molnar 	const struct cred *cred = current_cred(), *pcred;
43504746ed8SIngo Molnar 	guard(rcu)();
43604746ed8SIngo Molnar 
43704746ed8SIngo Molnar 	pcred = __task_cred(p);
43804746ed8SIngo Molnar 	return (uid_eq(cred->euid, pcred->euid) ||
43904746ed8SIngo Molnar 		uid_eq(cred->euid, pcred->uid));
44004746ed8SIngo Molnar }
44104746ed8SIngo Molnar 
44204746ed8SIngo Molnar #ifdef CONFIG_UCLAMP_TASK
44304746ed8SIngo Molnar 
44404746ed8SIngo Molnar static int uclamp_validate(struct task_struct *p,
44504746ed8SIngo Molnar 			   const struct sched_attr *attr)
44604746ed8SIngo Molnar {
44704746ed8SIngo Molnar 	int util_min = p->uclamp_req[UCLAMP_MIN].value;
44804746ed8SIngo Molnar 	int util_max = p->uclamp_req[UCLAMP_MAX].value;
44904746ed8SIngo Molnar 
45004746ed8SIngo Molnar 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
45104746ed8SIngo Molnar 		util_min = attr->sched_util_min;
45204746ed8SIngo Molnar 
45304746ed8SIngo Molnar 		if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
45404746ed8SIngo Molnar 			return -EINVAL;
45504746ed8SIngo Molnar 	}
45604746ed8SIngo Molnar 
45704746ed8SIngo Molnar 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
45804746ed8SIngo Molnar 		util_max = attr->sched_util_max;
45904746ed8SIngo Molnar 
46004746ed8SIngo Molnar 		if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
46104746ed8SIngo Molnar 			return -EINVAL;
46204746ed8SIngo Molnar 	}
46304746ed8SIngo Molnar 
46404746ed8SIngo Molnar 	if (util_min != -1 && util_max != -1 && util_min > util_max)
46504746ed8SIngo Molnar 		return -EINVAL;
46604746ed8SIngo Molnar 
46704746ed8SIngo Molnar 	/*
46804746ed8SIngo Molnar 	 * We have valid uclamp attributes; make sure uclamp is enabled.
46904746ed8SIngo Molnar 	 *
47004746ed8SIngo Molnar 	 * We need to do that here, because enabling static branches is a
47104746ed8SIngo Molnar 	 * blocking operation which obviously cannot be done while holding
47204746ed8SIngo Molnar 	 * scheduler locks.
47304746ed8SIngo Molnar 	 */
47404746ed8SIngo Molnar 	static_branch_enable(&sched_uclamp_used);
47504746ed8SIngo Molnar 
47604746ed8SIngo Molnar 	return 0;
47704746ed8SIngo Molnar }
47804746ed8SIngo Molnar 
47904746ed8SIngo Molnar static bool uclamp_reset(const struct sched_attr *attr,
48004746ed8SIngo Molnar 			 enum uclamp_id clamp_id,
48104746ed8SIngo Molnar 			 struct uclamp_se *uc_se)
48204746ed8SIngo Molnar {
48304746ed8SIngo Molnar 	/* Reset on sched class change for a non user-defined clamp value. */
48404746ed8SIngo Molnar 	if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
48504746ed8SIngo Molnar 	    !uc_se->user_defined)
48604746ed8SIngo Molnar 		return true;
48704746ed8SIngo Molnar 
48804746ed8SIngo Molnar 	/* Reset on sched_util_{min,max} == -1. */
48904746ed8SIngo Molnar 	if (clamp_id == UCLAMP_MIN &&
49004746ed8SIngo Molnar 	    attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
49104746ed8SIngo Molnar 	    attr->sched_util_min == -1) {
49204746ed8SIngo Molnar 		return true;
49304746ed8SIngo Molnar 	}
49404746ed8SIngo Molnar 
49504746ed8SIngo Molnar 	if (clamp_id == UCLAMP_MAX &&
49604746ed8SIngo Molnar 	    attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
49704746ed8SIngo Molnar 	    attr->sched_util_max == -1) {
49804746ed8SIngo Molnar 		return true;
49904746ed8SIngo Molnar 	}
50004746ed8SIngo Molnar 
50104746ed8SIngo Molnar 	return false;
50204746ed8SIngo Molnar }
50304746ed8SIngo Molnar 
50404746ed8SIngo Molnar static void __setscheduler_uclamp(struct task_struct *p,
50504746ed8SIngo Molnar 				  const struct sched_attr *attr)
50604746ed8SIngo Molnar {
50704746ed8SIngo Molnar 	enum uclamp_id clamp_id;
50804746ed8SIngo Molnar 
50904746ed8SIngo Molnar 	for_each_clamp_id(clamp_id) {
51004746ed8SIngo Molnar 		struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
51104746ed8SIngo Molnar 		unsigned int value;
51204746ed8SIngo Molnar 
51304746ed8SIngo Molnar 		if (!uclamp_reset(attr, clamp_id, uc_se))
51404746ed8SIngo Molnar 			continue;
51504746ed8SIngo Molnar 
51604746ed8SIngo Molnar 		/*
51704746ed8SIngo Molnar 		 * RT by default have a 100% boost value that could be modified
51804746ed8SIngo Molnar 		 * at runtime.
51904746ed8SIngo Molnar 		 */
52004746ed8SIngo Molnar 		if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
52104746ed8SIngo Molnar 			value = sysctl_sched_uclamp_util_min_rt_default;
52204746ed8SIngo Molnar 		else
52304746ed8SIngo Molnar 			value = uclamp_none(clamp_id);
52404746ed8SIngo Molnar 
52504746ed8SIngo Molnar 		uclamp_se_set(uc_se, value, false);
52604746ed8SIngo Molnar 
52704746ed8SIngo Molnar 	}
52804746ed8SIngo Molnar 
52904746ed8SIngo Molnar 	if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
53004746ed8SIngo Molnar 		return;
53104746ed8SIngo Molnar 
53204746ed8SIngo Molnar 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
53304746ed8SIngo Molnar 	    attr->sched_util_min != -1) {
53404746ed8SIngo Molnar 		uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
53504746ed8SIngo Molnar 			      attr->sched_util_min, true);
53604746ed8SIngo Molnar 	}
53704746ed8SIngo Molnar 
53804746ed8SIngo Molnar 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
53904746ed8SIngo Molnar 	    attr->sched_util_max != -1) {
54004746ed8SIngo Molnar 		uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
54104746ed8SIngo Molnar 			      attr->sched_util_max, true);
54204746ed8SIngo Molnar 	}
54304746ed8SIngo Molnar }
54404746ed8SIngo Molnar 
54504746ed8SIngo Molnar #else /* !CONFIG_UCLAMP_TASK: */
54604746ed8SIngo Molnar 
54704746ed8SIngo Molnar static inline int uclamp_validate(struct task_struct *p,
54804746ed8SIngo Molnar 				  const struct sched_attr *attr)
54904746ed8SIngo Molnar {
55004746ed8SIngo Molnar 	return -EOPNOTSUPP;
55104746ed8SIngo Molnar }
55204746ed8SIngo Molnar static void __setscheduler_uclamp(struct task_struct *p,
55304746ed8SIngo Molnar 				  const struct sched_attr *attr) { }
55404746ed8SIngo Molnar #endif
55504746ed8SIngo Molnar 
55604746ed8SIngo Molnar /*
55704746ed8SIngo Molnar  * Allow unprivileged RT tasks to decrease priority.
55804746ed8SIngo Molnar  * Only issue a capable test if needed and only once to avoid an audit
55904746ed8SIngo Molnar  * event on permitted non-privileged operations:
56004746ed8SIngo Molnar  */
56104746ed8SIngo Molnar static int user_check_sched_setscheduler(struct task_struct *p,
56204746ed8SIngo Molnar 					 const struct sched_attr *attr,
56304746ed8SIngo Molnar 					 int policy, int reset_on_fork)
56404746ed8SIngo Molnar {
56504746ed8SIngo Molnar 	if (fair_policy(policy)) {
56604746ed8SIngo Molnar 		if (attr->sched_nice < task_nice(p) &&
56704746ed8SIngo Molnar 		    !is_nice_reduction(p, attr->sched_nice))
56804746ed8SIngo Molnar 			goto req_priv;
56904746ed8SIngo Molnar 	}
57004746ed8SIngo Molnar 
57104746ed8SIngo Molnar 	if (rt_policy(policy)) {
57204746ed8SIngo Molnar 		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
57304746ed8SIngo Molnar 
57404746ed8SIngo Molnar 		/* Can't set/change the rt policy: */
57504746ed8SIngo Molnar 		if (policy != p->policy && !rlim_rtprio)
57604746ed8SIngo Molnar 			goto req_priv;
57704746ed8SIngo Molnar 
57804746ed8SIngo Molnar 		/* Can't increase priority: */
57904746ed8SIngo Molnar 		if (attr->sched_priority > p->rt_priority &&
58004746ed8SIngo Molnar 		    attr->sched_priority > rlim_rtprio)
58104746ed8SIngo Molnar 			goto req_priv;
58204746ed8SIngo Molnar 	}
58304746ed8SIngo Molnar 
58404746ed8SIngo Molnar 	/*
58504746ed8SIngo Molnar 	 * Can't set/change SCHED_DEADLINE policy at all for now
58604746ed8SIngo Molnar 	 * (safest behavior); in the future we would like to allow
58704746ed8SIngo Molnar 	 * unprivileged DL tasks to increase their relative deadline
58804746ed8SIngo Molnar 	 * or reduce their runtime (both ways reducing utilization)
58904746ed8SIngo Molnar 	 */
59004746ed8SIngo Molnar 	if (dl_policy(policy))
59104746ed8SIngo Molnar 		goto req_priv;
59204746ed8SIngo Molnar 
59304746ed8SIngo Molnar 	/*
59404746ed8SIngo Molnar 	 * Treat SCHED_IDLE as nice 20. Only allow a switch to
59504746ed8SIngo Molnar 	 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
59604746ed8SIngo Molnar 	 */
59704746ed8SIngo Molnar 	if (task_has_idle_policy(p) && !idle_policy(policy)) {
59804746ed8SIngo Molnar 		if (!is_nice_reduction(p, task_nice(p)))
59904746ed8SIngo Molnar 			goto req_priv;
60004746ed8SIngo Molnar 	}
60104746ed8SIngo Molnar 
60204746ed8SIngo Molnar 	/* Can't change other user's priorities: */
60304746ed8SIngo Molnar 	if (!check_same_owner(p))
60404746ed8SIngo Molnar 		goto req_priv;
60504746ed8SIngo Molnar 
60604746ed8SIngo Molnar 	/* Normal users shall not reset the sched_reset_on_fork flag: */
60704746ed8SIngo Molnar 	if (p->sched_reset_on_fork && !reset_on_fork)
60804746ed8SIngo Molnar 		goto req_priv;
60904746ed8SIngo Molnar 
61004746ed8SIngo Molnar 	return 0;
61104746ed8SIngo Molnar 
61204746ed8SIngo Molnar req_priv:
61304746ed8SIngo Molnar 	if (!capable(CAP_SYS_NICE))
61404746ed8SIngo Molnar 		return -EPERM;
61504746ed8SIngo Molnar 
61604746ed8SIngo Molnar 	return 0;
61704746ed8SIngo Molnar }
61804746ed8SIngo Molnar 
61904746ed8SIngo Molnar int __sched_setscheduler(struct task_struct *p,
62004746ed8SIngo Molnar 			 const struct sched_attr *attr,
62104746ed8SIngo Molnar 			 bool user, bool pi)
62204746ed8SIngo Molnar {
62304746ed8SIngo Molnar 	int oldpolicy = -1, policy = attr->sched_policy;
62404746ed8SIngo Molnar 	int retval, oldprio, newprio, queued, running;
62504746ed8SIngo Molnar 	const struct sched_class *prev_class;
62604746ed8SIngo Molnar 	struct balance_callback *head;
62704746ed8SIngo Molnar 	struct rq_flags rf;
62804746ed8SIngo Molnar 	int reset_on_fork;
62904746ed8SIngo Molnar 	int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
63004746ed8SIngo Molnar 	struct rq *rq;
63104746ed8SIngo Molnar 	bool cpuset_locked = false;
63204746ed8SIngo Molnar 
63304746ed8SIngo Molnar 	/* The pi code expects interrupts enabled */
63404746ed8SIngo Molnar 	BUG_ON(pi && in_interrupt());
63504746ed8SIngo Molnar recheck:
63604746ed8SIngo Molnar 	/* Double check policy once rq lock held: */
63704746ed8SIngo Molnar 	if (policy < 0) {
63804746ed8SIngo Molnar 		reset_on_fork = p->sched_reset_on_fork;
63904746ed8SIngo Molnar 		policy = oldpolicy = p->policy;
64004746ed8SIngo Molnar 	} else {
64104746ed8SIngo Molnar 		reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
64204746ed8SIngo Molnar 
64304746ed8SIngo Molnar 		if (!valid_policy(policy))
64404746ed8SIngo Molnar 			return -EINVAL;
64504746ed8SIngo Molnar 	}
64604746ed8SIngo Molnar 
64704746ed8SIngo Molnar 	if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
64804746ed8SIngo Molnar 		return -EINVAL;
64904746ed8SIngo Molnar 
65004746ed8SIngo Molnar 	/*
65104746ed8SIngo Molnar 	 * Valid priorities for SCHED_FIFO and SCHED_RR are
65204746ed8SIngo Molnar 	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
65304746ed8SIngo Molnar 	 * SCHED_BATCH and SCHED_IDLE is 0.
65404746ed8SIngo Molnar 	 */
65504746ed8SIngo Molnar 	if (attr->sched_priority > MAX_RT_PRIO-1)
65604746ed8SIngo Molnar 		return -EINVAL;
65704746ed8SIngo Molnar 	if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
65804746ed8SIngo Molnar 	    (rt_policy(policy) != (attr->sched_priority != 0)))
65904746ed8SIngo Molnar 		return -EINVAL;
66004746ed8SIngo Molnar 
66104746ed8SIngo Molnar 	if (user) {
66204746ed8SIngo Molnar 		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
66304746ed8SIngo Molnar 		if (retval)
66404746ed8SIngo Molnar 			return retval;
66504746ed8SIngo Molnar 
66604746ed8SIngo Molnar 		if (attr->sched_flags & SCHED_FLAG_SUGOV)
66704746ed8SIngo Molnar 			return -EINVAL;
66804746ed8SIngo Molnar 
66904746ed8SIngo Molnar 		retval = security_task_setscheduler(p);
67004746ed8SIngo Molnar 		if (retval)
67104746ed8SIngo Molnar 			return retval;
67204746ed8SIngo Molnar 	}
67304746ed8SIngo Molnar 
67404746ed8SIngo Molnar 	/* Update task specific "requested" clamps */
67504746ed8SIngo Molnar 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
67604746ed8SIngo Molnar 		retval = uclamp_validate(p, attr);
67704746ed8SIngo Molnar 		if (retval)
67804746ed8SIngo Molnar 			return retval;
67904746ed8SIngo Molnar 	}
68004746ed8SIngo Molnar 
68104746ed8SIngo Molnar 	/*
68204746ed8SIngo Molnar 	 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets
68304746ed8SIngo Molnar 	 * information.
68404746ed8SIngo Molnar 	 */
68504746ed8SIngo Molnar 	if (dl_policy(policy) || dl_policy(p->policy)) {
68604746ed8SIngo Molnar 		cpuset_locked = true;
68704746ed8SIngo Molnar 		cpuset_lock();
68804746ed8SIngo Molnar 	}
68904746ed8SIngo Molnar 
69004746ed8SIngo Molnar 	/*
69104746ed8SIngo Molnar 	 * Make sure no PI-waiters arrive (or leave) while we are
69204746ed8SIngo Molnar 	 * changing the priority of the task:
69304746ed8SIngo Molnar 	 *
69404746ed8SIngo Molnar 	 * To be able to change p->policy safely, the appropriate
69504746ed8SIngo Molnar 	 * runqueue lock must be held.
69604746ed8SIngo Molnar 	 */
69704746ed8SIngo Molnar 	rq = task_rq_lock(p, &rf);
69804746ed8SIngo Molnar 	update_rq_clock(rq);
69904746ed8SIngo Molnar 
70004746ed8SIngo Molnar 	/*
70104746ed8SIngo Molnar 	 * Changing the policy of the stop threads its a very bad idea:
70204746ed8SIngo Molnar 	 */
70304746ed8SIngo Molnar 	if (p == rq->stop) {
70404746ed8SIngo Molnar 		retval = -EINVAL;
70504746ed8SIngo Molnar 		goto unlock;
70604746ed8SIngo Molnar 	}
70704746ed8SIngo Molnar 
70804746ed8SIngo Molnar 	/*
70904746ed8SIngo Molnar 	 * If not changing anything there's no need to proceed further,
71004746ed8SIngo Molnar 	 * but store a possible modification of reset_on_fork.
71104746ed8SIngo Molnar 	 */
71204746ed8SIngo Molnar 	if (unlikely(policy == p->policy)) {
713*857b158dSPeter Zijlstra 		if (fair_policy(policy) &&
714*857b158dSPeter Zijlstra 		    (attr->sched_nice != task_nice(p) ||
715*857b158dSPeter Zijlstra 		     (attr->sched_runtime != p->se.slice)))
71604746ed8SIngo Molnar 			goto change;
71704746ed8SIngo Molnar 		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
71804746ed8SIngo Molnar 			goto change;
71904746ed8SIngo Molnar 		if (dl_policy(policy) && dl_param_changed(p, attr))
72004746ed8SIngo Molnar 			goto change;
72104746ed8SIngo Molnar 		if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
72204746ed8SIngo Molnar 			goto change;
72304746ed8SIngo Molnar 
72404746ed8SIngo Molnar 		p->sched_reset_on_fork = reset_on_fork;
72504746ed8SIngo Molnar 		retval = 0;
72604746ed8SIngo Molnar 		goto unlock;
72704746ed8SIngo Molnar 	}
72804746ed8SIngo Molnar change:
72904746ed8SIngo Molnar 
73004746ed8SIngo Molnar 	if (user) {
73104746ed8SIngo Molnar #ifdef CONFIG_RT_GROUP_SCHED
73204746ed8SIngo Molnar 		/*
733402de7fcSIngo Molnar 		 * Do not allow real-time tasks into groups that have no runtime
73404746ed8SIngo Molnar 		 * assigned.
73504746ed8SIngo Molnar 		 */
73604746ed8SIngo Molnar 		if (rt_bandwidth_enabled() && rt_policy(policy) &&
73704746ed8SIngo Molnar 				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
73804746ed8SIngo Molnar 				!task_group_is_autogroup(task_group(p))) {
73904746ed8SIngo Molnar 			retval = -EPERM;
74004746ed8SIngo Molnar 			goto unlock;
74104746ed8SIngo Molnar 		}
74204746ed8SIngo Molnar #endif
74304746ed8SIngo Molnar #ifdef CONFIG_SMP
74404746ed8SIngo Molnar 		if (dl_bandwidth_enabled() && dl_policy(policy) &&
74504746ed8SIngo Molnar 				!(attr->sched_flags & SCHED_FLAG_SUGOV)) {
74604746ed8SIngo Molnar 			cpumask_t *span = rq->rd->span;
74704746ed8SIngo Molnar 
74804746ed8SIngo Molnar 			/*
74904746ed8SIngo Molnar 			 * Don't allow tasks with an affinity mask smaller than
75004746ed8SIngo Molnar 			 * the entire root_domain to become SCHED_DEADLINE. We
75104746ed8SIngo Molnar 			 * will also fail if there's no bandwidth available.
75204746ed8SIngo Molnar 			 */
75304746ed8SIngo Molnar 			if (!cpumask_subset(span, p->cpus_ptr) ||
75404746ed8SIngo Molnar 			    rq->rd->dl_bw.bw == 0) {
75504746ed8SIngo Molnar 				retval = -EPERM;
75604746ed8SIngo Molnar 				goto unlock;
75704746ed8SIngo Molnar 			}
75804746ed8SIngo Molnar 		}
75904746ed8SIngo Molnar #endif
76004746ed8SIngo Molnar 	}
76104746ed8SIngo Molnar 
76204746ed8SIngo Molnar 	/* Re-check policy now with rq lock held: */
76304746ed8SIngo Molnar 	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
76404746ed8SIngo Molnar 		policy = oldpolicy = -1;
76504746ed8SIngo Molnar 		task_rq_unlock(rq, p, &rf);
76604746ed8SIngo Molnar 		if (cpuset_locked)
76704746ed8SIngo Molnar 			cpuset_unlock();
76804746ed8SIngo Molnar 		goto recheck;
76904746ed8SIngo Molnar 	}
77004746ed8SIngo Molnar 
77104746ed8SIngo Molnar 	/*
77204746ed8SIngo Molnar 	 * If setscheduling to SCHED_DEADLINE (or changing the parameters
77304746ed8SIngo Molnar 	 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
77404746ed8SIngo Molnar 	 * is available.
77504746ed8SIngo Molnar 	 */
77604746ed8SIngo Molnar 	if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
77704746ed8SIngo Molnar 		retval = -EBUSY;
77804746ed8SIngo Molnar 		goto unlock;
77904746ed8SIngo Molnar 	}
78004746ed8SIngo Molnar 
78104746ed8SIngo Molnar 	p->sched_reset_on_fork = reset_on_fork;
78204746ed8SIngo Molnar 	oldprio = p->prio;
78304746ed8SIngo Molnar 
78404746ed8SIngo Molnar 	newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
78504746ed8SIngo Molnar 	if (pi) {
78604746ed8SIngo Molnar 		/*
78704746ed8SIngo Molnar 		 * Take priority boosted tasks into account. If the new
78804746ed8SIngo Molnar 		 * effective priority is unchanged, we just store the new
78904746ed8SIngo Molnar 		 * normal parameters and do not touch the scheduler class and
79004746ed8SIngo Molnar 		 * the runqueue. This will be done when the task deboost
79104746ed8SIngo Molnar 		 * itself.
79204746ed8SIngo Molnar 		 */
79304746ed8SIngo Molnar 		newprio = rt_effective_prio(p, newprio);
79404746ed8SIngo Molnar 		if (newprio == oldprio)
79504746ed8SIngo Molnar 			queue_flags &= ~DEQUEUE_MOVE;
79604746ed8SIngo Molnar 	}
79704746ed8SIngo Molnar 
79804746ed8SIngo Molnar 	queued = task_on_rq_queued(p);
79904746ed8SIngo Molnar 	running = task_current(rq, p);
80004746ed8SIngo Molnar 	if (queued)
80104746ed8SIngo Molnar 		dequeue_task(rq, p, queue_flags);
80204746ed8SIngo Molnar 	if (running)
80304746ed8SIngo Molnar 		put_prev_task(rq, p);
80404746ed8SIngo Molnar 
80504746ed8SIngo Molnar 	prev_class = p->sched_class;
80604746ed8SIngo Molnar 
80704746ed8SIngo Molnar 	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
80804746ed8SIngo Molnar 		__setscheduler_params(p, attr);
80904746ed8SIngo Molnar 		__setscheduler_prio(p, newprio);
81004746ed8SIngo Molnar 	}
81104746ed8SIngo Molnar 	__setscheduler_uclamp(p, attr);
81204746ed8SIngo Molnar 
81304746ed8SIngo Molnar 	if (queued) {
81404746ed8SIngo Molnar 		/*
81504746ed8SIngo Molnar 		 * We enqueue to tail when the priority of a task is
81604746ed8SIngo Molnar 		 * increased (user space view).
81704746ed8SIngo Molnar 		 */
81804746ed8SIngo Molnar 		if (oldprio < p->prio)
81904746ed8SIngo Molnar 			queue_flags |= ENQUEUE_HEAD;
82004746ed8SIngo Molnar 
82104746ed8SIngo Molnar 		enqueue_task(rq, p, queue_flags);
82204746ed8SIngo Molnar 	}
82304746ed8SIngo Molnar 	if (running)
82404746ed8SIngo Molnar 		set_next_task(rq, p);
82504746ed8SIngo Molnar 
82604746ed8SIngo Molnar 	check_class_changed(rq, p, prev_class, oldprio);
82704746ed8SIngo Molnar 
82804746ed8SIngo Molnar 	/* Avoid rq from going away on us: */
82904746ed8SIngo Molnar 	preempt_disable();
83004746ed8SIngo Molnar 	head = splice_balance_callbacks(rq);
83104746ed8SIngo Molnar 	task_rq_unlock(rq, p, &rf);
83204746ed8SIngo Molnar 
83304746ed8SIngo Molnar 	if (pi) {
83404746ed8SIngo Molnar 		if (cpuset_locked)
83504746ed8SIngo Molnar 			cpuset_unlock();
83604746ed8SIngo Molnar 		rt_mutex_adjust_pi(p);
83704746ed8SIngo Molnar 	}
83804746ed8SIngo Molnar 
83904746ed8SIngo Molnar 	/* Run balance callbacks after we've adjusted the PI chain: */
84004746ed8SIngo Molnar 	balance_callbacks(rq, head);
84104746ed8SIngo Molnar 	preempt_enable();
84204746ed8SIngo Molnar 
84304746ed8SIngo Molnar 	return 0;
84404746ed8SIngo Molnar 
84504746ed8SIngo Molnar unlock:
84604746ed8SIngo Molnar 	task_rq_unlock(rq, p, &rf);
84704746ed8SIngo Molnar 	if (cpuset_locked)
84804746ed8SIngo Molnar 		cpuset_unlock();
84904746ed8SIngo Molnar 	return retval;
85004746ed8SIngo Molnar }
85104746ed8SIngo Molnar 
85204746ed8SIngo Molnar static int _sched_setscheduler(struct task_struct *p, int policy,
85304746ed8SIngo Molnar 			       const struct sched_param *param, bool check)
85404746ed8SIngo Molnar {
85504746ed8SIngo Molnar 	struct sched_attr attr = {
85604746ed8SIngo Molnar 		.sched_policy   = policy,
85704746ed8SIngo Molnar 		.sched_priority = param->sched_priority,
85804746ed8SIngo Molnar 		.sched_nice	= PRIO_TO_NICE(p->static_prio),
85904746ed8SIngo Molnar 	};
86004746ed8SIngo Molnar 
861*857b158dSPeter Zijlstra 	if (p->se.custom_slice)
862*857b158dSPeter Zijlstra 		attr.sched_runtime = p->se.slice;
863*857b158dSPeter Zijlstra 
86404746ed8SIngo Molnar 	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
86504746ed8SIngo Molnar 	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
86604746ed8SIngo Molnar 		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
86704746ed8SIngo Molnar 		policy &= ~SCHED_RESET_ON_FORK;
86804746ed8SIngo Molnar 		attr.sched_policy = policy;
86904746ed8SIngo Molnar 	}
87004746ed8SIngo Molnar 
87104746ed8SIngo Molnar 	return __sched_setscheduler(p, &attr, check, true);
87204746ed8SIngo Molnar }
87304746ed8SIngo Molnar /**
87404746ed8SIngo Molnar  * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
87504746ed8SIngo Molnar  * @p: the task in question.
87604746ed8SIngo Molnar  * @policy: new policy.
87704746ed8SIngo Molnar  * @param: structure containing the new RT priority.
87804746ed8SIngo Molnar  *
87904746ed8SIngo Molnar  * Use sched_set_fifo(), read its comment.
88004746ed8SIngo Molnar  *
88104746ed8SIngo Molnar  * Return: 0 on success. An error code otherwise.
88204746ed8SIngo Molnar  *
88304746ed8SIngo Molnar  * NOTE that the task may be already dead.
88404746ed8SIngo Molnar  */
88504746ed8SIngo Molnar int sched_setscheduler(struct task_struct *p, int policy,
88604746ed8SIngo Molnar 		       const struct sched_param *param)
88704746ed8SIngo Molnar {
88804746ed8SIngo Molnar 	return _sched_setscheduler(p, policy, param, true);
88904746ed8SIngo Molnar }
89004746ed8SIngo Molnar 
89104746ed8SIngo Molnar int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
89204746ed8SIngo Molnar {
89304746ed8SIngo Molnar 	return __sched_setscheduler(p, attr, true, true);
89404746ed8SIngo Molnar }
89504746ed8SIngo Molnar 
89604746ed8SIngo Molnar int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
89704746ed8SIngo Molnar {
89804746ed8SIngo Molnar 	return __sched_setscheduler(p, attr, false, true);
89904746ed8SIngo Molnar }
90004746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
90104746ed8SIngo Molnar 
90204746ed8SIngo Molnar /**
903402de7fcSIngo Molnar  * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernel-space.
90404746ed8SIngo Molnar  * @p: the task in question.
90504746ed8SIngo Molnar  * @policy: new policy.
90604746ed8SIngo Molnar  * @param: structure containing the new RT priority.
90704746ed8SIngo Molnar  *
90804746ed8SIngo Molnar  * Just like sched_setscheduler, only don't bother checking if the
90904746ed8SIngo Molnar  * current context has permission.  For example, this is needed in
91004746ed8SIngo Molnar  * stop_machine(): we create temporary high priority worker threads,
91104746ed8SIngo Molnar  * but our caller might not have that capability.
91204746ed8SIngo Molnar  *
91304746ed8SIngo Molnar  * Return: 0 on success. An error code otherwise.
91404746ed8SIngo Molnar  */
91504746ed8SIngo Molnar int sched_setscheduler_nocheck(struct task_struct *p, int policy,
91604746ed8SIngo Molnar 			       const struct sched_param *param)
91704746ed8SIngo Molnar {
91804746ed8SIngo Molnar 	return _sched_setscheduler(p, policy, param, false);
91904746ed8SIngo Molnar }
92004746ed8SIngo Molnar 
92104746ed8SIngo Molnar /*
92204746ed8SIngo Molnar  * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
92304746ed8SIngo Molnar  * incapable of resource management, which is the one thing an OS really should
92404746ed8SIngo Molnar  * be doing.
92504746ed8SIngo Molnar  *
92604746ed8SIngo Molnar  * This is of course the reason it is limited to privileged users only.
92704746ed8SIngo Molnar  *
92804746ed8SIngo Molnar  * Worse still; it is fundamentally impossible to compose static priority
92904746ed8SIngo Molnar  * workloads. You cannot take two correctly working static prio workloads
93004746ed8SIngo Molnar  * and smash them together and still expect them to work.
93104746ed8SIngo Molnar  *
93204746ed8SIngo Molnar  * For this reason 'all' FIFO tasks the kernel creates are basically at:
93304746ed8SIngo Molnar  *
93404746ed8SIngo Molnar  *   MAX_RT_PRIO / 2
93504746ed8SIngo Molnar  *
93604746ed8SIngo Molnar  * The administrator _MUST_ configure the system, the kernel simply doesn't
93704746ed8SIngo Molnar  * know enough information to make a sensible choice.
93804746ed8SIngo Molnar  */
93904746ed8SIngo Molnar void sched_set_fifo(struct task_struct *p)
94004746ed8SIngo Molnar {
94104746ed8SIngo Molnar 	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
94204746ed8SIngo Molnar 	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
94304746ed8SIngo Molnar }
94404746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_set_fifo);
94504746ed8SIngo Molnar 
94604746ed8SIngo Molnar /*
94704746ed8SIngo Molnar  * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
94804746ed8SIngo Molnar  */
94904746ed8SIngo Molnar void sched_set_fifo_low(struct task_struct *p)
95004746ed8SIngo Molnar {
95104746ed8SIngo Molnar 	struct sched_param sp = { .sched_priority = 1 };
95204746ed8SIngo Molnar 	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
95304746ed8SIngo Molnar }
95404746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_set_fifo_low);
95504746ed8SIngo Molnar 
95604746ed8SIngo Molnar void sched_set_normal(struct task_struct *p, int nice)
95704746ed8SIngo Molnar {
95804746ed8SIngo Molnar 	struct sched_attr attr = {
95904746ed8SIngo Molnar 		.sched_policy = SCHED_NORMAL,
96004746ed8SIngo Molnar 		.sched_nice = nice,
96104746ed8SIngo Molnar 	};
96204746ed8SIngo Molnar 	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
96304746ed8SIngo Molnar }
96404746ed8SIngo Molnar EXPORT_SYMBOL_GPL(sched_set_normal);
96504746ed8SIngo Molnar 
96604746ed8SIngo Molnar static int
96704746ed8SIngo Molnar do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
96804746ed8SIngo Molnar {
96904746ed8SIngo Molnar 	struct sched_param lparam;
97004746ed8SIngo Molnar 
97104746ed8SIngo Molnar 	if (!param || pid < 0)
97204746ed8SIngo Molnar 		return -EINVAL;
97304746ed8SIngo Molnar 	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
97404746ed8SIngo Molnar 		return -EFAULT;
97504746ed8SIngo Molnar 
97604746ed8SIngo Molnar 	CLASS(find_get_task, p)(pid);
97704746ed8SIngo Molnar 	if (!p)
97804746ed8SIngo Molnar 		return -ESRCH;
97904746ed8SIngo Molnar 
98004746ed8SIngo Molnar 	return sched_setscheduler(p, policy, &lparam);
98104746ed8SIngo Molnar }
98204746ed8SIngo Molnar 
98304746ed8SIngo Molnar /*
98404746ed8SIngo Molnar  * Mimics kernel/events/core.c perf_copy_attr().
98504746ed8SIngo Molnar  */
98604746ed8SIngo Molnar static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
98704746ed8SIngo Molnar {
98804746ed8SIngo Molnar 	u32 size;
98904746ed8SIngo Molnar 	int ret;
99004746ed8SIngo Molnar 
99104746ed8SIngo Molnar 	/* Zero the full structure, so that a short copy will be nice: */
99204746ed8SIngo Molnar 	memset(attr, 0, sizeof(*attr));
99304746ed8SIngo Molnar 
99404746ed8SIngo Molnar 	ret = get_user(size, &uattr->size);
99504746ed8SIngo Molnar 	if (ret)
99604746ed8SIngo Molnar 		return ret;
99704746ed8SIngo Molnar 
99804746ed8SIngo Molnar 	/* ABI compatibility quirk: */
99904746ed8SIngo Molnar 	if (!size)
100004746ed8SIngo Molnar 		size = SCHED_ATTR_SIZE_VER0;
100104746ed8SIngo Molnar 	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
100204746ed8SIngo Molnar 		goto err_size;
100304746ed8SIngo Molnar 
100404746ed8SIngo Molnar 	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
100504746ed8SIngo Molnar 	if (ret) {
100604746ed8SIngo Molnar 		if (ret == -E2BIG)
100704746ed8SIngo Molnar 			goto err_size;
100804746ed8SIngo Molnar 		return ret;
100904746ed8SIngo Molnar 	}
101004746ed8SIngo Molnar 
101104746ed8SIngo Molnar 	if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
101204746ed8SIngo Molnar 	    size < SCHED_ATTR_SIZE_VER1)
101304746ed8SIngo Molnar 		return -EINVAL;
101404746ed8SIngo Molnar 
101504746ed8SIngo Molnar 	/*
101604746ed8SIngo Molnar 	 * XXX: Do we want to be lenient like existing syscalls; or do we want
101704746ed8SIngo Molnar 	 * to be strict and return an error on out-of-bounds values?
101804746ed8SIngo Molnar 	 */
101904746ed8SIngo Molnar 	attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
102004746ed8SIngo Molnar 
102104746ed8SIngo Molnar 	return 0;
102204746ed8SIngo Molnar 
102304746ed8SIngo Molnar err_size:
102404746ed8SIngo Molnar 	put_user(sizeof(*attr), &uattr->size);
102504746ed8SIngo Molnar 	return -E2BIG;
102604746ed8SIngo Molnar }
102704746ed8SIngo Molnar 
102804746ed8SIngo Molnar static void get_params(struct task_struct *p, struct sched_attr *attr)
102904746ed8SIngo Molnar {
1030*857b158dSPeter Zijlstra 	if (task_has_dl_policy(p)) {
103104746ed8SIngo Molnar 		__getparam_dl(p, attr);
1032*857b158dSPeter Zijlstra 	} else if (task_has_rt_policy(p)) {
103304746ed8SIngo Molnar 		attr->sched_priority = p->rt_priority;
1034*857b158dSPeter Zijlstra 	} else {
103504746ed8SIngo Molnar 		attr->sched_nice = task_nice(p);
1036*857b158dSPeter Zijlstra 		attr->sched_runtime = p->se.slice;
1037*857b158dSPeter Zijlstra 	}
103804746ed8SIngo Molnar }
103904746ed8SIngo Molnar 
104004746ed8SIngo Molnar /**
104104746ed8SIngo Molnar  * sys_sched_setscheduler - set/change the scheduler policy and RT priority
104204746ed8SIngo Molnar  * @pid: the pid in question.
104304746ed8SIngo Molnar  * @policy: new policy.
104404746ed8SIngo Molnar  * @param: structure containing the new RT priority.
104504746ed8SIngo Molnar  *
104604746ed8SIngo Molnar  * Return: 0 on success. An error code otherwise.
104704746ed8SIngo Molnar  */
104804746ed8SIngo Molnar SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
104904746ed8SIngo Molnar {
105004746ed8SIngo Molnar 	if (policy < 0)
105104746ed8SIngo Molnar 		return -EINVAL;
105204746ed8SIngo Molnar 
105304746ed8SIngo Molnar 	return do_sched_setscheduler(pid, policy, param);
105404746ed8SIngo Molnar }
105504746ed8SIngo Molnar 
105604746ed8SIngo Molnar /**
105704746ed8SIngo Molnar  * sys_sched_setparam - set/change the RT priority of a thread
105804746ed8SIngo Molnar  * @pid: the pid in question.
105904746ed8SIngo Molnar  * @param: structure containing the new RT priority.
106004746ed8SIngo Molnar  *
106104746ed8SIngo Molnar  * Return: 0 on success. An error code otherwise.
106204746ed8SIngo Molnar  */
106304746ed8SIngo Molnar SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
106404746ed8SIngo Molnar {
106504746ed8SIngo Molnar 	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
106604746ed8SIngo Molnar }
106704746ed8SIngo Molnar 
106804746ed8SIngo Molnar /**
106904746ed8SIngo Molnar  * sys_sched_setattr - same as above, but with extended sched_attr
107004746ed8SIngo Molnar  * @pid: the pid in question.
107104746ed8SIngo Molnar  * @uattr: structure containing the extended parameters.
107204746ed8SIngo Molnar  * @flags: for future extension.
107304746ed8SIngo Molnar  */
107404746ed8SIngo Molnar SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
107504746ed8SIngo Molnar 			       unsigned int, flags)
107604746ed8SIngo Molnar {
107704746ed8SIngo Molnar 	struct sched_attr attr;
107804746ed8SIngo Molnar 	int retval;
107904746ed8SIngo Molnar 
108004746ed8SIngo Molnar 	if (!uattr || pid < 0 || flags)
108104746ed8SIngo Molnar 		return -EINVAL;
108204746ed8SIngo Molnar 
108304746ed8SIngo Molnar 	retval = sched_copy_attr(uattr, &attr);
108404746ed8SIngo Molnar 	if (retval)
108504746ed8SIngo Molnar 		return retval;
108604746ed8SIngo Molnar 
108704746ed8SIngo Molnar 	if ((int)attr.sched_policy < 0)
108804746ed8SIngo Molnar 		return -EINVAL;
108904746ed8SIngo Molnar 	if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
109004746ed8SIngo Molnar 		attr.sched_policy = SETPARAM_POLICY;
109104746ed8SIngo Molnar 
109204746ed8SIngo Molnar 	CLASS(find_get_task, p)(pid);
109304746ed8SIngo Molnar 	if (!p)
109404746ed8SIngo Molnar 		return -ESRCH;
109504746ed8SIngo Molnar 
109604746ed8SIngo Molnar 	if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
109704746ed8SIngo Molnar 		get_params(p, &attr);
109804746ed8SIngo Molnar 
109904746ed8SIngo Molnar 	return sched_setattr(p, &attr);
110004746ed8SIngo Molnar }
110104746ed8SIngo Molnar 
110204746ed8SIngo Molnar /**
110304746ed8SIngo Molnar  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
110404746ed8SIngo Molnar  * @pid: the pid in question.
110504746ed8SIngo Molnar  *
110604746ed8SIngo Molnar  * Return: On success, the policy of the thread. Otherwise, a negative error
110704746ed8SIngo Molnar  * code.
110804746ed8SIngo Molnar  */
110904746ed8SIngo Molnar SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
111004746ed8SIngo Molnar {
111104746ed8SIngo Molnar 	struct task_struct *p;
111204746ed8SIngo Molnar 	int retval;
111304746ed8SIngo Molnar 
111404746ed8SIngo Molnar 	if (pid < 0)
111504746ed8SIngo Molnar 		return -EINVAL;
111604746ed8SIngo Molnar 
111704746ed8SIngo Molnar 	guard(rcu)();
111804746ed8SIngo Molnar 	p = find_process_by_pid(pid);
111904746ed8SIngo Molnar 	if (!p)
112004746ed8SIngo Molnar 		return -ESRCH;
112104746ed8SIngo Molnar 
112204746ed8SIngo Molnar 	retval = security_task_getscheduler(p);
112304746ed8SIngo Molnar 	if (!retval) {
112404746ed8SIngo Molnar 		retval = p->policy;
112504746ed8SIngo Molnar 		if (p->sched_reset_on_fork)
112604746ed8SIngo Molnar 			retval |= SCHED_RESET_ON_FORK;
112704746ed8SIngo Molnar 	}
112804746ed8SIngo Molnar 	return retval;
112904746ed8SIngo Molnar }
113004746ed8SIngo Molnar 
113104746ed8SIngo Molnar /**
113204746ed8SIngo Molnar  * sys_sched_getparam - get the RT priority of a thread
113304746ed8SIngo Molnar  * @pid: the pid in question.
113404746ed8SIngo Molnar  * @param: structure containing the RT priority.
113504746ed8SIngo Molnar  *
113604746ed8SIngo Molnar  * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
113704746ed8SIngo Molnar  * code.
113804746ed8SIngo Molnar  */
113904746ed8SIngo Molnar SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
114004746ed8SIngo Molnar {
114104746ed8SIngo Molnar 	struct sched_param lp = { .sched_priority = 0 };
114204746ed8SIngo Molnar 	struct task_struct *p;
114304746ed8SIngo Molnar 	int retval;
114404746ed8SIngo Molnar 
114504746ed8SIngo Molnar 	if (!param || pid < 0)
114604746ed8SIngo Molnar 		return -EINVAL;
114704746ed8SIngo Molnar 
114804746ed8SIngo Molnar 	scoped_guard (rcu) {
114904746ed8SIngo Molnar 		p = find_process_by_pid(pid);
115004746ed8SIngo Molnar 		if (!p)
115104746ed8SIngo Molnar 			return -ESRCH;
115204746ed8SIngo Molnar 
115304746ed8SIngo Molnar 		retval = security_task_getscheduler(p);
115404746ed8SIngo Molnar 		if (retval)
115504746ed8SIngo Molnar 			return retval;
115604746ed8SIngo Molnar 
115704746ed8SIngo Molnar 		if (task_has_rt_policy(p))
115804746ed8SIngo Molnar 			lp.sched_priority = p->rt_priority;
115904746ed8SIngo Molnar 	}
116004746ed8SIngo Molnar 
116104746ed8SIngo Molnar 	/*
116204746ed8SIngo Molnar 	 * This one might sleep, we cannot do it with a spinlock held ...
116304746ed8SIngo Molnar 	 */
116404746ed8SIngo Molnar 	return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
116504746ed8SIngo Molnar }
116604746ed8SIngo Molnar 
116704746ed8SIngo Molnar /*
116804746ed8SIngo Molnar  * Copy the kernel size attribute structure (which might be larger
116904746ed8SIngo Molnar  * than what user-space knows about) to user-space.
117004746ed8SIngo Molnar  *
117104746ed8SIngo Molnar  * Note that all cases are valid: user-space buffer can be larger or
117204746ed8SIngo Molnar  * smaller than the kernel-space buffer. The usual case is that both
117304746ed8SIngo Molnar  * have the same size.
117404746ed8SIngo Molnar  */
117504746ed8SIngo Molnar static int
117604746ed8SIngo Molnar sched_attr_copy_to_user(struct sched_attr __user *uattr,
117704746ed8SIngo Molnar 			struct sched_attr *kattr,
117804746ed8SIngo Molnar 			unsigned int usize)
117904746ed8SIngo Molnar {
118004746ed8SIngo Molnar 	unsigned int ksize = sizeof(*kattr);
118104746ed8SIngo Molnar 
118204746ed8SIngo Molnar 	if (!access_ok(uattr, usize))
118304746ed8SIngo Molnar 		return -EFAULT;
118404746ed8SIngo Molnar 
118504746ed8SIngo Molnar 	/*
118604746ed8SIngo Molnar 	 * sched_getattr() ABI forwards and backwards compatibility:
118704746ed8SIngo Molnar 	 *
118804746ed8SIngo Molnar 	 * If usize == ksize then we just copy everything to user-space and all is good.
118904746ed8SIngo Molnar 	 *
119004746ed8SIngo Molnar 	 * If usize < ksize then we only copy as much as user-space has space for,
119104746ed8SIngo Molnar 	 * this keeps ABI compatibility as well. We skip the rest.
119204746ed8SIngo Molnar 	 *
119304746ed8SIngo Molnar 	 * If usize > ksize then user-space is using a newer version of the ABI,
119404746ed8SIngo Molnar 	 * which part the kernel doesn't know about. Just ignore it - tooling can
119504746ed8SIngo Molnar 	 * detect the kernel's knowledge of attributes from the attr->size value
119604746ed8SIngo Molnar 	 * which is set to ksize in this case.
119704746ed8SIngo Molnar 	 */
119804746ed8SIngo Molnar 	kattr->size = min(usize, ksize);
119904746ed8SIngo Molnar 
120004746ed8SIngo Molnar 	if (copy_to_user(uattr, kattr, kattr->size))
120104746ed8SIngo Molnar 		return -EFAULT;
120204746ed8SIngo Molnar 
120304746ed8SIngo Molnar 	return 0;
120404746ed8SIngo Molnar }
120504746ed8SIngo Molnar 
120604746ed8SIngo Molnar /**
120704746ed8SIngo Molnar  * sys_sched_getattr - similar to sched_getparam, but with sched_attr
120804746ed8SIngo Molnar  * @pid: the pid in question.
120904746ed8SIngo Molnar  * @uattr: structure containing the extended parameters.
121004746ed8SIngo Molnar  * @usize: sizeof(attr) for fwd/bwd comp.
121104746ed8SIngo Molnar  * @flags: for future extension.
121204746ed8SIngo Molnar  */
121304746ed8SIngo Molnar SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
121404746ed8SIngo Molnar 		unsigned int, usize, unsigned int, flags)
121504746ed8SIngo Molnar {
121604746ed8SIngo Molnar 	struct sched_attr kattr = { };
121704746ed8SIngo Molnar 	struct task_struct *p;
121804746ed8SIngo Molnar 	int retval;
121904746ed8SIngo Molnar 
122004746ed8SIngo Molnar 	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
122104746ed8SIngo Molnar 	    usize < SCHED_ATTR_SIZE_VER0 || flags)
122204746ed8SIngo Molnar 		return -EINVAL;
122304746ed8SIngo Molnar 
122404746ed8SIngo Molnar 	scoped_guard (rcu) {
122504746ed8SIngo Molnar 		p = find_process_by_pid(pid);
122604746ed8SIngo Molnar 		if (!p)
122704746ed8SIngo Molnar 			return -ESRCH;
122804746ed8SIngo Molnar 
122904746ed8SIngo Molnar 		retval = security_task_getscheduler(p);
123004746ed8SIngo Molnar 		if (retval)
123104746ed8SIngo Molnar 			return retval;
123204746ed8SIngo Molnar 
123304746ed8SIngo Molnar 		kattr.sched_policy = p->policy;
123404746ed8SIngo Molnar 		if (p->sched_reset_on_fork)
123504746ed8SIngo Molnar 			kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
123604746ed8SIngo Molnar 		get_params(p, &kattr);
123704746ed8SIngo Molnar 		kattr.sched_flags &= SCHED_FLAG_ALL;
123804746ed8SIngo Molnar 
123904746ed8SIngo Molnar #ifdef CONFIG_UCLAMP_TASK
124004746ed8SIngo Molnar 		/*
124104746ed8SIngo Molnar 		 * This could race with another potential updater, but this is fine
124204746ed8SIngo Molnar 		 * because it'll correctly read the old or the new value. We don't need
124304746ed8SIngo Molnar 		 * to guarantee who wins the race as long as it doesn't return garbage.
124404746ed8SIngo Molnar 		 */
124504746ed8SIngo Molnar 		kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
124604746ed8SIngo Molnar 		kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
124704746ed8SIngo Molnar #endif
124804746ed8SIngo Molnar 	}
124904746ed8SIngo Molnar 
125004746ed8SIngo Molnar 	return sched_attr_copy_to_user(uattr, &kattr, usize);
125104746ed8SIngo Molnar }
125204746ed8SIngo Molnar 
125304746ed8SIngo Molnar #ifdef CONFIG_SMP
125404746ed8SIngo Molnar int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
125504746ed8SIngo Molnar {
125604746ed8SIngo Molnar 	/*
125704746ed8SIngo Molnar 	 * If the task isn't a deadline task or admission control is
125804746ed8SIngo Molnar 	 * disabled then we don't care about affinity changes.
125904746ed8SIngo Molnar 	 */
126004746ed8SIngo Molnar 	if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
126104746ed8SIngo Molnar 		return 0;
126204746ed8SIngo Molnar 
126304746ed8SIngo Molnar 	/*
126404746ed8SIngo Molnar 	 * Since bandwidth control happens on root_domain basis,
126504746ed8SIngo Molnar 	 * if admission test is enabled, we only admit -deadline
126604746ed8SIngo Molnar 	 * tasks allowed to run on all the CPUs in the task's
126704746ed8SIngo Molnar 	 * root_domain.
126804746ed8SIngo Molnar 	 */
126904746ed8SIngo Molnar 	guard(rcu)();
127004746ed8SIngo Molnar 	if (!cpumask_subset(task_rq(p)->rd->span, mask))
127104746ed8SIngo Molnar 		return -EBUSY;
127204746ed8SIngo Molnar 
127304746ed8SIngo Molnar 	return 0;
127404746ed8SIngo Molnar }
127504746ed8SIngo Molnar #endif /* CONFIG_SMP */
127604746ed8SIngo Molnar 
127704746ed8SIngo Molnar int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
127804746ed8SIngo Molnar {
127904746ed8SIngo Molnar 	int retval;
128004746ed8SIngo Molnar 	cpumask_var_t cpus_allowed, new_mask;
128104746ed8SIngo Molnar 
128204746ed8SIngo Molnar 	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
128304746ed8SIngo Molnar 		return -ENOMEM;
128404746ed8SIngo Molnar 
128504746ed8SIngo Molnar 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
128604746ed8SIngo Molnar 		retval = -ENOMEM;
128704746ed8SIngo Molnar 		goto out_free_cpus_allowed;
128804746ed8SIngo Molnar 	}
128904746ed8SIngo Molnar 
129004746ed8SIngo Molnar 	cpuset_cpus_allowed(p, cpus_allowed);
129104746ed8SIngo Molnar 	cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
129204746ed8SIngo Molnar 
129304746ed8SIngo Molnar 	ctx->new_mask = new_mask;
129404746ed8SIngo Molnar 	ctx->flags |= SCA_CHECK;
129504746ed8SIngo Molnar 
129604746ed8SIngo Molnar 	retval = dl_task_check_affinity(p, new_mask);
129704746ed8SIngo Molnar 	if (retval)
129804746ed8SIngo Molnar 		goto out_free_new_mask;
129904746ed8SIngo Molnar 
130004746ed8SIngo Molnar 	retval = __set_cpus_allowed_ptr(p, ctx);
130104746ed8SIngo Molnar 	if (retval)
130204746ed8SIngo Molnar 		goto out_free_new_mask;
130304746ed8SIngo Molnar 
130404746ed8SIngo Molnar 	cpuset_cpus_allowed(p, cpus_allowed);
130504746ed8SIngo Molnar 	if (!cpumask_subset(new_mask, cpus_allowed)) {
130604746ed8SIngo Molnar 		/*
130704746ed8SIngo Molnar 		 * We must have raced with a concurrent cpuset update.
130804746ed8SIngo Molnar 		 * Just reset the cpumask to the cpuset's cpus_allowed.
130904746ed8SIngo Molnar 		 */
131004746ed8SIngo Molnar 		cpumask_copy(new_mask, cpus_allowed);
131104746ed8SIngo Molnar 
131204746ed8SIngo Molnar 		/*
131304746ed8SIngo Molnar 		 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
131404746ed8SIngo Molnar 		 * will restore the previous user_cpus_ptr value.
131504746ed8SIngo Molnar 		 *
131604746ed8SIngo Molnar 		 * In the unlikely event a previous user_cpus_ptr exists,
131704746ed8SIngo Molnar 		 * we need to further restrict the mask to what is allowed
131804746ed8SIngo Molnar 		 * by that old user_cpus_ptr.
131904746ed8SIngo Molnar 		 */
132004746ed8SIngo Molnar 		if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
132104746ed8SIngo Molnar 			bool empty = !cpumask_and(new_mask, new_mask,
132204746ed8SIngo Molnar 						  ctx->user_mask);
132304746ed8SIngo Molnar 
132404746ed8SIngo Molnar 			if (WARN_ON_ONCE(empty))
132504746ed8SIngo Molnar 				cpumask_copy(new_mask, cpus_allowed);
132604746ed8SIngo Molnar 		}
132704746ed8SIngo Molnar 		__set_cpus_allowed_ptr(p, ctx);
132804746ed8SIngo Molnar 		retval = -EINVAL;
132904746ed8SIngo Molnar 	}
133004746ed8SIngo Molnar 
133104746ed8SIngo Molnar out_free_new_mask:
133204746ed8SIngo Molnar 	free_cpumask_var(new_mask);
133304746ed8SIngo Molnar out_free_cpus_allowed:
133404746ed8SIngo Molnar 	free_cpumask_var(cpus_allowed);
133504746ed8SIngo Molnar 	return retval;
133604746ed8SIngo Molnar }
133704746ed8SIngo Molnar 
133804746ed8SIngo Molnar long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
133904746ed8SIngo Molnar {
134004746ed8SIngo Molnar 	struct affinity_context ac;
134104746ed8SIngo Molnar 	struct cpumask *user_mask;
134204746ed8SIngo Molnar 	int retval;
134304746ed8SIngo Molnar 
134404746ed8SIngo Molnar 	CLASS(find_get_task, p)(pid);
134504746ed8SIngo Molnar 	if (!p)
134604746ed8SIngo Molnar 		return -ESRCH;
134704746ed8SIngo Molnar 
134804746ed8SIngo Molnar 	if (p->flags & PF_NO_SETAFFINITY)
134904746ed8SIngo Molnar 		return -EINVAL;
135004746ed8SIngo Molnar 
135104746ed8SIngo Molnar 	if (!check_same_owner(p)) {
135204746ed8SIngo Molnar 		guard(rcu)();
135304746ed8SIngo Molnar 		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
135404746ed8SIngo Molnar 			return -EPERM;
135504746ed8SIngo Molnar 	}
135604746ed8SIngo Molnar 
135704746ed8SIngo Molnar 	retval = security_task_setscheduler(p);
135804746ed8SIngo Molnar 	if (retval)
135904746ed8SIngo Molnar 		return retval;
136004746ed8SIngo Molnar 
136104746ed8SIngo Molnar 	/*
136204746ed8SIngo Molnar 	 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
136304746ed8SIngo Molnar 	 * alloc_user_cpus_ptr() returns NULL.
136404746ed8SIngo Molnar 	 */
136504746ed8SIngo Molnar 	user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
136604746ed8SIngo Molnar 	if (user_mask) {
136704746ed8SIngo Molnar 		cpumask_copy(user_mask, in_mask);
136804746ed8SIngo Molnar 	} else if (IS_ENABLED(CONFIG_SMP)) {
136904746ed8SIngo Molnar 		return -ENOMEM;
137004746ed8SIngo Molnar 	}
137104746ed8SIngo Molnar 
137204746ed8SIngo Molnar 	ac = (struct affinity_context){
137304746ed8SIngo Molnar 		.new_mask  = in_mask,
137404746ed8SIngo Molnar 		.user_mask = user_mask,
137504746ed8SIngo Molnar 		.flags     = SCA_USER,
137604746ed8SIngo Molnar 	};
137704746ed8SIngo Molnar 
137804746ed8SIngo Molnar 	retval = __sched_setaffinity(p, &ac);
137904746ed8SIngo Molnar 	kfree(ac.user_mask);
138004746ed8SIngo Molnar 
138104746ed8SIngo Molnar 	return retval;
138204746ed8SIngo Molnar }
138304746ed8SIngo Molnar 
138404746ed8SIngo Molnar static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
138504746ed8SIngo Molnar 			     struct cpumask *new_mask)
138604746ed8SIngo Molnar {
138704746ed8SIngo Molnar 	if (len < cpumask_size())
138804746ed8SIngo Molnar 		cpumask_clear(new_mask);
138904746ed8SIngo Molnar 	else if (len > cpumask_size())
139004746ed8SIngo Molnar 		len = cpumask_size();
139104746ed8SIngo Molnar 
139204746ed8SIngo Molnar 	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
139304746ed8SIngo Molnar }
139404746ed8SIngo Molnar 
139504746ed8SIngo Molnar /**
139604746ed8SIngo Molnar  * sys_sched_setaffinity - set the CPU affinity of a process
139704746ed8SIngo Molnar  * @pid: pid of the process
139804746ed8SIngo Molnar  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
139904746ed8SIngo Molnar  * @user_mask_ptr: user-space pointer to the new CPU mask
140004746ed8SIngo Molnar  *
140104746ed8SIngo Molnar  * Return: 0 on success. An error code otherwise.
140204746ed8SIngo Molnar  */
140304746ed8SIngo Molnar SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
140404746ed8SIngo Molnar 		unsigned long __user *, user_mask_ptr)
140504746ed8SIngo Molnar {
140604746ed8SIngo Molnar 	cpumask_var_t new_mask;
140704746ed8SIngo Molnar 	int retval;
140804746ed8SIngo Molnar 
140904746ed8SIngo Molnar 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
141004746ed8SIngo Molnar 		return -ENOMEM;
141104746ed8SIngo Molnar 
141204746ed8SIngo Molnar 	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
141304746ed8SIngo Molnar 	if (retval == 0)
141404746ed8SIngo Molnar 		retval = sched_setaffinity(pid, new_mask);
141504746ed8SIngo Molnar 	free_cpumask_var(new_mask);
141604746ed8SIngo Molnar 	return retval;
141704746ed8SIngo Molnar }
141804746ed8SIngo Molnar 
141904746ed8SIngo Molnar long sched_getaffinity(pid_t pid, struct cpumask *mask)
142004746ed8SIngo Molnar {
142104746ed8SIngo Molnar 	struct task_struct *p;
142204746ed8SIngo Molnar 	int retval;
142304746ed8SIngo Molnar 
142404746ed8SIngo Molnar 	guard(rcu)();
142504746ed8SIngo Molnar 	p = find_process_by_pid(pid);
142604746ed8SIngo Molnar 	if (!p)
142704746ed8SIngo Molnar 		return -ESRCH;
142804746ed8SIngo Molnar 
142904746ed8SIngo Molnar 	retval = security_task_getscheduler(p);
143004746ed8SIngo Molnar 	if (retval)
143104746ed8SIngo Molnar 		return retval;
143204746ed8SIngo Molnar 
143304746ed8SIngo Molnar 	guard(raw_spinlock_irqsave)(&p->pi_lock);
143404746ed8SIngo Molnar 	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
143504746ed8SIngo Molnar 
143604746ed8SIngo Molnar 	return 0;
143704746ed8SIngo Molnar }
143804746ed8SIngo Molnar 
143904746ed8SIngo Molnar /**
144004746ed8SIngo Molnar  * sys_sched_getaffinity - get the CPU affinity of a process
144104746ed8SIngo Molnar  * @pid: pid of the process
144204746ed8SIngo Molnar  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
144304746ed8SIngo Molnar  * @user_mask_ptr: user-space pointer to hold the current CPU mask
144404746ed8SIngo Molnar  *
144504746ed8SIngo Molnar  * Return: size of CPU mask copied to user_mask_ptr on success. An
144604746ed8SIngo Molnar  * error code otherwise.
144704746ed8SIngo Molnar  */
144804746ed8SIngo Molnar SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
144904746ed8SIngo Molnar 		unsigned long __user *, user_mask_ptr)
145004746ed8SIngo Molnar {
145104746ed8SIngo Molnar 	int ret;
145204746ed8SIngo Molnar 	cpumask_var_t mask;
145304746ed8SIngo Molnar 
145404746ed8SIngo Molnar 	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
145504746ed8SIngo Molnar 		return -EINVAL;
145604746ed8SIngo Molnar 	if (len & (sizeof(unsigned long)-1))
145704746ed8SIngo Molnar 		return -EINVAL;
145804746ed8SIngo Molnar 
145904746ed8SIngo Molnar 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
146004746ed8SIngo Molnar 		return -ENOMEM;
146104746ed8SIngo Molnar 
146204746ed8SIngo Molnar 	ret = sched_getaffinity(pid, mask);
146304746ed8SIngo Molnar 	if (ret == 0) {
146404746ed8SIngo Molnar 		unsigned int retlen = min(len, cpumask_size());
146504746ed8SIngo Molnar 
146604746ed8SIngo Molnar 		if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
146704746ed8SIngo Molnar 			ret = -EFAULT;
146804746ed8SIngo Molnar 		else
146904746ed8SIngo Molnar 			ret = retlen;
147004746ed8SIngo Molnar 	}
147104746ed8SIngo Molnar 	free_cpumask_var(mask);
147204746ed8SIngo Molnar 
147304746ed8SIngo Molnar 	return ret;
147404746ed8SIngo Molnar }
147504746ed8SIngo Molnar 
147604746ed8SIngo Molnar static void do_sched_yield(void)
147704746ed8SIngo Molnar {
147804746ed8SIngo Molnar 	struct rq_flags rf;
147904746ed8SIngo Molnar 	struct rq *rq;
148004746ed8SIngo Molnar 
148104746ed8SIngo Molnar 	rq = this_rq_lock_irq(&rf);
148204746ed8SIngo Molnar 
148304746ed8SIngo Molnar 	schedstat_inc(rq->yld_count);
148404746ed8SIngo Molnar 	current->sched_class->yield_task(rq);
148504746ed8SIngo Molnar 
148604746ed8SIngo Molnar 	preempt_disable();
148704746ed8SIngo Molnar 	rq_unlock_irq(rq, &rf);
148804746ed8SIngo Molnar 	sched_preempt_enable_no_resched();
148904746ed8SIngo Molnar 
149004746ed8SIngo Molnar 	schedule();
149104746ed8SIngo Molnar }
149204746ed8SIngo Molnar 
149304746ed8SIngo Molnar /**
149404746ed8SIngo Molnar  * sys_sched_yield - yield the current processor to other threads.
149504746ed8SIngo Molnar  *
149604746ed8SIngo Molnar  * This function yields the current CPU to other tasks. If there are no
149704746ed8SIngo Molnar  * other threads running on this CPU then this function will return.
149804746ed8SIngo Molnar  *
149904746ed8SIngo Molnar  * Return: 0.
150004746ed8SIngo Molnar  */
150104746ed8SIngo Molnar SYSCALL_DEFINE0(sched_yield)
150204746ed8SIngo Molnar {
150304746ed8SIngo Molnar 	do_sched_yield();
150404746ed8SIngo Molnar 	return 0;
150504746ed8SIngo Molnar }
150604746ed8SIngo Molnar 
150704746ed8SIngo Molnar /**
150804746ed8SIngo Molnar  * yield - yield the current processor to other threads.
150904746ed8SIngo Molnar  *
151004746ed8SIngo Molnar  * Do not ever use this function, there's a 99% chance you're doing it wrong.
151104746ed8SIngo Molnar  *
151204746ed8SIngo Molnar  * The scheduler is at all times free to pick the calling task as the most
151304746ed8SIngo Molnar  * eligible task to run, if removing the yield() call from your code breaks
151404746ed8SIngo Molnar  * it, it's already broken.
151504746ed8SIngo Molnar  *
151604746ed8SIngo Molnar  * Typical broken usage is:
151704746ed8SIngo Molnar  *
151804746ed8SIngo Molnar  * while (!event)
151904746ed8SIngo Molnar  *	yield();
152004746ed8SIngo Molnar  *
152104746ed8SIngo Molnar  * where one assumes that yield() will let 'the other' process run that will
152204746ed8SIngo Molnar  * make event true. If the current task is a SCHED_FIFO task that will never
152304746ed8SIngo Molnar  * happen. Never use yield() as a progress guarantee!!
152404746ed8SIngo Molnar  *
152504746ed8SIngo Molnar  * If you want to use yield() to wait for something, use wait_event().
152604746ed8SIngo Molnar  * If you want to use yield() to be 'nice' for others, use cond_resched().
152704746ed8SIngo Molnar  * If you still want to use yield(), do not!
152804746ed8SIngo Molnar  */
152904746ed8SIngo Molnar void __sched yield(void)
153004746ed8SIngo Molnar {
153104746ed8SIngo Molnar 	set_current_state(TASK_RUNNING);
153204746ed8SIngo Molnar 	do_sched_yield();
153304746ed8SIngo Molnar }
153404746ed8SIngo Molnar EXPORT_SYMBOL(yield);
153504746ed8SIngo Molnar 
153604746ed8SIngo Molnar /**
153704746ed8SIngo Molnar  * yield_to - yield the current processor to another thread in
153804746ed8SIngo Molnar  * your thread group, or accelerate that thread toward the
153904746ed8SIngo Molnar  * processor it's on.
154004746ed8SIngo Molnar  * @p: target task
154104746ed8SIngo Molnar  * @preempt: whether task preemption is allowed or not
154204746ed8SIngo Molnar  *
154304746ed8SIngo Molnar  * It's the caller's job to ensure that the target task struct
154404746ed8SIngo Molnar  * can't go away on us before we can do any checks.
154504746ed8SIngo Molnar  *
154604746ed8SIngo Molnar  * Return:
154704746ed8SIngo Molnar  *	true (>0) if we indeed boosted the target task.
154804746ed8SIngo Molnar  *	false (0) if we failed to boost the target.
154904746ed8SIngo Molnar  *	-ESRCH if there's no task to yield to.
155004746ed8SIngo Molnar  */
155104746ed8SIngo Molnar int __sched yield_to(struct task_struct *p, bool preempt)
155204746ed8SIngo Molnar {
155304746ed8SIngo Molnar 	struct task_struct *curr = current;
155404746ed8SIngo Molnar 	struct rq *rq, *p_rq;
155504746ed8SIngo Molnar 	int yielded = 0;
155604746ed8SIngo Molnar 
155704746ed8SIngo Molnar 	scoped_guard (irqsave) {
155804746ed8SIngo Molnar 		rq = this_rq();
155904746ed8SIngo Molnar 
156004746ed8SIngo Molnar again:
156104746ed8SIngo Molnar 		p_rq = task_rq(p);
156204746ed8SIngo Molnar 		/*
156304746ed8SIngo Molnar 		 * If we're the only runnable task on the rq and target rq also
156404746ed8SIngo Molnar 		 * has only one task, there's absolutely no point in yielding.
156504746ed8SIngo Molnar 		 */
156604746ed8SIngo Molnar 		if (rq->nr_running == 1 && p_rq->nr_running == 1)
156704746ed8SIngo Molnar 			return -ESRCH;
156804746ed8SIngo Molnar 
156904746ed8SIngo Molnar 		guard(double_rq_lock)(rq, p_rq);
157004746ed8SIngo Molnar 		if (task_rq(p) != p_rq)
157104746ed8SIngo Molnar 			goto again;
157204746ed8SIngo Molnar 
157304746ed8SIngo Molnar 		if (!curr->sched_class->yield_to_task)
157404746ed8SIngo Molnar 			return 0;
157504746ed8SIngo Molnar 
157604746ed8SIngo Molnar 		if (curr->sched_class != p->sched_class)
157704746ed8SIngo Molnar 			return 0;
157804746ed8SIngo Molnar 
157904746ed8SIngo Molnar 		if (task_on_cpu(p_rq, p) || !task_is_running(p))
158004746ed8SIngo Molnar 			return 0;
158104746ed8SIngo Molnar 
158204746ed8SIngo Molnar 		yielded = curr->sched_class->yield_to_task(rq, p);
158304746ed8SIngo Molnar 		if (yielded) {
158404746ed8SIngo Molnar 			schedstat_inc(rq->yld_count);
158504746ed8SIngo Molnar 			/*
158604746ed8SIngo Molnar 			 * Make p's CPU reschedule; pick_next_entity
158704746ed8SIngo Molnar 			 * takes care of fairness.
158804746ed8SIngo Molnar 			 */
158904746ed8SIngo Molnar 			if (preempt && rq != p_rq)
159004746ed8SIngo Molnar 				resched_curr(p_rq);
159104746ed8SIngo Molnar 		}
159204746ed8SIngo Molnar 	}
159304746ed8SIngo Molnar 
159404746ed8SIngo Molnar 	if (yielded)
159504746ed8SIngo Molnar 		schedule();
159604746ed8SIngo Molnar 
159704746ed8SIngo Molnar 	return yielded;
159804746ed8SIngo Molnar }
159904746ed8SIngo Molnar EXPORT_SYMBOL_GPL(yield_to);
160004746ed8SIngo Molnar 
160104746ed8SIngo Molnar /**
160204746ed8SIngo Molnar  * sys_sched_get_priority_max - return maximum RT priority.
160304746ed8SIngo Molnar  * @policy: scheduling class.
160404746ed8SIngo Molnar  *
160504746ed8SIngo Molnar  * Return: On success, this syscall returns the maximum
160604746ed8SIngo Molnar  * rt_priority that can be used by a given scheduling class.
160704746ed8SIngo Molnar  * On failure, a negative error code is returned.
160804746ed8SIngo Molnar  */
160904746ed8SIngo Molnar SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
161004746ed8SIngo Molnar {
161104746ed8SIngo Molnar 	int ret = -EINVAL;
161204746ed8SIngo Molnar 
161304746ed8SIngo Molnar 	switch (policy) {
161404746ed8SIngo Molnar 	case SCHED_FIFO:
161504746ed8SIngo Molnar 	case SCHED_RR:
161604746ed8SIngo Molnar 		ret = MAX_RT_PRIO-1;
161704746ed8SIngo Molnar 		break;
161804746ed8SIngo Molnar 	case SCHED_DEADLINE:
161904746ed8SIngo Molnar 	case SCHED_NORMAL:
162004746ed8SIngo Molnar 	case SCHED_BATCH:
162104746ed8SIngo Molnar 	case SCHED_IDLE:
162204746ed8SIngo Molnar 		ret = 0;
162304746ed8SIngo Molnar 		break;
162404746ed8SIngo Molnar 	}
162504746ed8SIngo Molnar 	return ret;
162604746ed8SIngo Molnar }
162704746ed8SIngo Molnar 
162804746ed8SIngo Molnar /**
162904746ed8SIngo Molnar  * sys_sched_get_priority_min - return minimum RT priority.
163004746ed8SIngo Molnar  * @policy: scheduling class.
163104746ed8SIngo Molnar  *
163204746ed8SIngo Molnar  * Return: On success, this syscall returns the minimum
163304746ed8SIngo Molnar  * rt_priority that can be used by a given scheduling class.
163404746ed8SIngo Molnar  * On failure, a negative error code is returned.
163504746ed8SIngo Molnar  */
163604746ed8SIngo Molnar SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
163704746ed8SIngo Molnar {
163804746ed8SIngo Molnar 	int ret = -EINVAL;
163904746ed8SIngo Molnar 
164004746ed8SIngo Molnar 	switch (policy) {
164104746ed8SIngo Molnar 	case SCHED_FIFO:
164204746ed8SIngo Molnar 	case SCHED_RR:
164304746ed8SIngo Molnar 		ret = 1;
164404746ed8SIngo Molnar 		break;
164504746ed8SIngo Molnar 	case SCHED_DEADLINE:
164604746ed8SIngo Molnar 	case SCHED_NORMAL:
164704746ed8SIngo Molnar 	case SCHED_BATCH:
164804746ed8SIngo Molnar 	case SCHED_IDLE:
164904746ed8SIngo Molnar 		ret = 0;
165004746ed8SIngo Molnar 	}
165104746ed8SIngo Molnar 	return ret;
165204746ed8SIngo Molnar }
165304746ed8SIngo Molnar 
165404746ed8SIngo Molnar static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
165504746ed8SIngo Molnar {
165604746ed8SIngo Molnar 	unsigned int time_slice = 0;
165704746ed8SIngo Molnar 	int retval;
165804746ed8SIngo Molnar 
165904746ed8SIngo Molnar 	if (pid < 0)
166004746ed8SIngo Molnar 		return -EINVAL;
166104746ed8SIngo Molnar 
166204746ed8SIngo Molnar 	scoped_guard (rcu) {
166304746ed8SIngo Molnar 		struct task_struct *p = find_process_by_pid(pid);
166404746ed8SIngo Molnar 		if (!p)
166504746ed8SIngo Molnar 			return -ESRCH;
166604746ed8SIngo Molnar 
166704746ed8SIngo Molnar 		retval = security_task_getscheduler(p);
166804746ed8SIngo Molnar 		if (retval)
166904746ed8SIngo Molnar 			return retval;
167004746ed8SIngo Molnar 
167104746ed8SIngo Molnar 		scoped_guard (task_rq_lock, p) {
167204746ed8SIngo Molnar 			struct rq *rq = scope.rq;
167304746ed8SIngo Molnar 			if (p->sched_class->get_rr_interval)
167404746ed8SIngo Molnar 				time_slice = p->sched_class->get_rr_interval(rq, p);
167504746ed8SIngo Molnar 		}
167604746ed8SIngo Molnar 	}
167704746ed8SIngo Molnar 
167804746ed8SIngo Molnar 	jiffies_to_timespec64(time_slice, t);
167904746ed8SIngo Molnar 	return 0;
168004746ed8SIngo Molnar }
168104746ed8SIngo Molnar 
168204746ed8SIngo Molnar /**
1683402de7fcSIngo Molnar  * sys_sched_rr_get_interval - return the default time-slice of a process.
168404746ed8SIngo Molnar  * @pid: pid of the process.
1685402de7fcSIngo Molnar  * @interval: userspace pointer to the time-slice value.
168604746ed8SIngo Molnar  *
1687402de7fcSIngo Molnar  * this syscall writes the default time-slice value of a given process
168804746ed8SIngo Molnar  * into the user-space timespec buffer. A value of '0' means infinity.
168904746ed8SIngo Molnar  *
1690402de7fcSIngo Molnar  * Return: On success, 0 and the time-slice is in @interval. Otherwise,
169104746ed8SIngo Molnar  * an error code.
169204746ed8SIngo Molnar  */
169304746ed8SIngo Molnar SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
169404746ed8SIngo Molnar 		struct __kernel_timespec __user *, interval)
169504746ed8SIngo Molnar {
169604746ed8SIngo Molnar 	struct timespec64 t;
169704746ed8SIngo Molnar 	int retval = sched_rr_get_interval(pid, &t);
169804746ed8SIngo Molnar 
169904746ed8SIngo Molnar 	if (retval == 0)
170004746ed8SIngo Molnar 		retval = put_timespec64(&t, interval);
170104746ed8SIngo Molnar 
170204746ed8SIngo Molnar 	return retval;
170304746ed8SIngo Molnar }
170404746ed8SIngo Molnar 
170504746ed8SIngo Molnar #ifdef CONFIG_COMPAT_32BIT_TIME
170604746ed8SIngo Molnar SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
170704746ed8SIngo Molnar 		struct old_timespec32 __user *, interval)
170804746ed8SIngo Molnar {
170904746ed8SIngo Molnar 	struct timespec64 t;
171004746ed8SIngo Molnar 	int retval = sched_rr_get_interval(pid, &t);
171104746ed8SIngo Molnar 
171204746ed8SIngo Molnar 	if (retval == 0)
171304746ed8SIngo Molnar 		retval = put_old_timespec32(&t, interval);
171404746ed8SIngo Molnar 	return retval;
171504746ed8SIngo Molnar }
171604746ed8SIngo Molnar #endif
1717