xref: /linux/kernel/sched/syscalls.c (revision f55b0671e3f90824ac06dc06b988075eb9c6830c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  kernel/sched/syscalls.c
4  *
5  *  Core kernel scheduler syscalls related code
6  *
7  *  Copyright (C) 1991-2002  Linus Torvalds
8  *  Copyright (C) 1998-2024  Ingo Molnar, Red Hat
9  */
10 #include <linux/sched.h>
11 #include <linux/cpuset.h>
12 #include <linux/sched/debug.h>
13 
14 #include <uapi/linux/sched/types.h>
15 
16 #include "sched.h"
17 #include "autogroup.h"
18 
__normal_prio(int policy,int rt_prio,int nice)19 static inline int __normal_prio(int policy, int rt_prio, int nice)
20 {
21 	int prio;
22 
23 	if (dl_policy(policy))
24 		prio = MAX_DL_PRIO - 1;
25 	else if (rt_policy(policy))
26 		prio = MAX_RT_PRIO - 1 - rt_prio;
27 	else
28 		prio = NICE_TO_PRIO(nice);
29 
30 	return prio;
31 }
32 
33 /*
34  * Calculate the expected normal priority: i.e. priority
35  * without taking RT-inheritance into account. Might be
36  * boosted by interactivity modifiers. Changes upon fork,
37  * setprio syscalls, and whenever the interactivity
38  * estimator recalculates.
39  */
normal_prio(struct task_struct * p)40 static inline int normal_prio(struct task_struct *p)
41 {
42 	return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
43 }
44 
45 /*
46  * Calculate the current priority, i.e. the priority
47  * taken into account by the scheduler. This value might
48  * be boosted by RT tasks, or might be boosted by
49  * interactivity modifiers. Will be RT if the task got
50  * RT-boosted. If not then it returns p->normal_prio.
51  */
effective_prio(struct task_struct * p)52 static int effective_prio(struct task_struct *p)
53 {
54 	p->normal_prio = normal_prio(p);
55 	/*
56 	 * If we are RT tasks or we were boosted to RT priority,
57 	 * keep the priority unchanged. Otherwise, update priority
58 	 * to the normal priority:
59 	 */
60 	if (!rt_or_dl_prio(p->prio))
61 		return p->normal_prio;
62 	return p->prio;
63 }
64 
set_user_nice(struct task_struct * p,long nice)65 void set_user_nice(struct task_struct *p, long nice)
66 {
67 	bool queued, running;
68 	struct rq *rq;
69 	int old_prio;
70 
71 	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
72 		return;
73 	/*
74 	 * We have to be careful, if called from sys_setpriority(),
75 	 * the task might be in the middle of scheduling on another CPU.
76 	 */
77 	CLASS(task_rq_lock, rq_guard)(p);
78 	rq = rq_guard.rq;
79 
80 	update_rq_clock(rq);
81 
82 	/*
83 	 * The RT priorities are set via sched_setscheduler(), but we still
84 	 * allow the 'normal' nice value to be set - but as expected
85 	 * it won't have any effect on scheduling until the task is
86 	 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
87 	 */
88 	if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
89 		p->static_prio = NICE_TO_PRIO(nice);
90 		return;
91 	}
92 
93 	queued = task_on_rq_queued(p);
94 	running = task_current_donor(rq, p);
95 	if (queued)
96 		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
97 	if (running)
98 		put_prev_task(rq, p);
99 
100 	p->static_prio = NICE_TO_PRIO(nice);
101 	set_load_weight(p, true);
102 	old_prio = p->prio;
103 	p->prio = effective_prio(p);
104 
105 	if (queued)
106 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
107 	if (running)
108 		set_next_task(rq, p);
109 
110 	/*
111 	 * If the task increased its priority or is running and
112 	 * lowered its priority, then reschedule its CPU:
113 	 */
114 	p->sched_class->prio_changed(rq, p, old_prio);
115 }
116 EXPORT_SYMBOL(set_user_nice);
117 
118 /*
119  * is_nice_reduction - check if nice value is an actual reduction
120  *
121  * Similar to can_nice() but does not perform a capability check.
122  *
123  * @p: task
124  * @nice: nice value
125  */
is_nice_reduction(const struct task_struct * p,const int nice)126 static bool is_nice_reduction(const struct task_struct *p, const int nice)
127 {
128 	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
129 	int nice_rlim = nice_to_rlimit(nice);
130 
131 	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
132 }
133 
134 /*
135  * can_nice - check if a task can reduce its nice value
136  * @p: task
137  * @nice: nice value
138  */
can_nice(const struct task_struct * p,const int nice)139 int can_nice(const struct task_struct *p, const int nice)
140 {
141 	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
142 }
143 
144 #ifdef __ARCH_WANT_SYS_NICE
145 
146 /*
147  * sys_nice - change the priority of the current process.
148  * @increment: priority increment
149  *
150  * sys_setpriority is a more generic, but much slower function that
151  * does similar things.
152  */
SYSCALL_DEFINE1(nice,int,increment)153 SYSCALL_DEFINE1(nice, int, increment)
154 {
155 	long nice, retval;
156 
157 	/*
158 	 * Setpriority might change our priority at the same moment.
159 	 * We don't have to worry. Conceptually one call occurs first
160 	 * and we have a single winner.
161 	 */
162 	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
163 	nice = task_nice(current) + increment;
164 
165 	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
166 	if (increment < 0 && !can_nice(current, nice))
167 		return -EPERM;
168 
169 	retval = security_task_setnice(current, nice);
170 	if (retval)
171 		return retval;
172 
173 	set_user_nice(current, nice);
174 	return 0;
175 }
176 
177 #endif
178 
179 /**
180  * task_prio - return the priority value of a given task.
181  * @p: the task in question.
182  *
183  * Return: The priority value as seen by users in /proc.
184  *
185  * sched policy         return value   kernel prio    user prio/nice
186  *
187  * normal, batch, idle     [0 ... 39]  [100 ... 139]          0/[-20 ... 19]
188  * fifo, rr             [-2 ... -100]     [98 ... 0]  [1 ... 99]
189  * deadline                     -101             -1           0
190  */
task_prio(const struct task_struct * p)191 int task_prio(const struct task_struct *p)
192 {
193 	return p->prio - MAX_RT_PRIO;
194 }
195 
196 /**
197  * idle_cpu - is a given CPU idle currently?
198  * @cpu: the processor in question.
199  *
200  * Return: 1 if the CPU is currently idle. 0 otherwise.
201  */
idle_cpu(int cpu)202 int idle_cpu(int cpu)
203 {
204 	struct rq *rq = cpu_rq(cpu);
205 
206 	if (rq->curr != rq->idle)
207 		return 0;
208 
209 	if (rq->nr_running)
210 		return 0;
211 
212 #ifdef CONFIG_SMP
213 	if (rq->ttwu_pending)
214 		return 0;
215 #endif
216 
217 	return 1;
218 }
219 
220 /**
221  * available_idle_cpu - is a given CPU idle for enqueuing work.
222  * @cpu: the CPU in question.
223  *
224  * Return: 1 if the CPU is currently idle. 0 otherwise.
225  */
available_idle_cpu(int cpu)226 int available_idle_cpu(int cpu)
227 {
228 	if (!idle_cpu(cpu))
229 		return 0;
230 
231 	if (vcpu_is_preempted(cpu))
232 		return 0;
233 
234 	return 1;
235 }
236 
237 /**
238  * idle_task - return the idle task for a given CPU.
239  * @cpu: the processor in question.
240  *
241  * Return: The idle task for the CPU @cpu.
242  */
idle_task(int cpu)243 struct task_struct *idle_task(int cpu)
244 {
245 	return cpu_rq(cpu)->idle;
246 }
247 
248 #ifdef CONFIG_SCHED_CORE
sched_core_idle_cpu(int cpu)249 int sched_core_idle_cpu(int cpu)
250 {
251 	struct rq *rq = cpu_rq(cpu);
252 
253 	if (sched_core_enabled(rq) && rq->curr == rq->idle)
254 		return 1;
255 
256 	return idle_cpu(cpu);
257 }
258 
259 #endif
260 
261 /**
262  * find_process_by_pid - find a process with a matching PID value.
263  * @pid: the pid in question.
264  *
265  * The task of @pid, if found. %NULL otherwise.
266  */
find_process_by_pid(pid_t pid)267 static struct task_struct *find_process_by_pid(pid_t pid)
268 {
269 	return pid ? find_task_by_vpid(pid) : current;
270 }
271 
find_get_task(pid_t pid)272 static struct task_struct *find_get_task(pid_t pid)
273 {
274 	struct task_struct *p;
275 	guard(rcu)();
276 
277 	p = find_process_by_pid(pid);
278 	if (likely(p))
279 		get_task_struct(p);
280 
281 	return p;
282 }
283 
DEFINE_CLASS(find_get_task,struct task_struct *,if (_T)put_task_struct (_T),find_get_task (pid),pid_t pid)284 DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T),
285 	     find_get_task(pid), pid_t pid)
286 
287 /*
288  * sched_setparam() passes in -1 for its policy, to let the functions
289  * it calls know not to change it.
290  */
291 #define SETPARAM_POLICY	-1
292 
293 static void __setscheduler_params(struct task_struct *p,
294 		const struct sched_attr *attr)
295 {
296 	int policy = attr->sched_policy;
297 
298 	if (policy == SETPARAM_POLICY)
299 		policy = p->policy;
300 
301 	p->policy = policy;
302 
303 	if (dl_policy(policy))
304 		__setparam_dl(p, attr);
305 	else if (fair_policy(policy))
306 		__setparam_fair(p, attr);
307 
308 	/* rt-policy tasks do not have a timerslack */
309 	if (rt_or_dl_task_policy(p)) {
310 		p->timer_slack_ns = 0;
311 	} else if (p->timer_slack_ns == 0) {
312 		/* when switching back to non-rt policy, restore timerslack */
313 		p->timer_slack_ns = p->default_timer_slack_ns;
314 	}
315 
316 	/*
317 	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
318 	 * !rt_policy. Always setting this ensures that things like
319 	 * getparam()/getattr() don't report silly values for !rt tasks.
320 	 */
321 	p->rt_priority = attr->sched_priority;
322 	p->normal_prio = normal_prio(p);
323 	set_load_weight(p, true);
324 }
325 
326 /*
327  * Check the target process has a UID that matches the current process's:
328  */
check_same_owner(struct task_struct * p)329 static bool check_same_owner(struct task_struct *p)
330 {
331 	const struct cred *cred = current_cred(), *pcred;
332 	guard(rcu)();
333 
334 	pcred = __task_cred(p);
335 	return (uid_eq(cred->euid, pcred->euid) ||
336 		uid_eq(cred->euid, pcred->uid));
337 }
338 
339 #ifdef CONFIG_UCLAMP_TASK
340 
uclamp_validate(struct task_struct * p,const struct sched_attr * attr)341 static int uclamp_validate(struct task_struct *p,
342 			   const struct sched_attr *attr)
343 {
344 	int util_min = p->uclamp_req[UCLAMP_MIN].value;
345 	int util_max = p->uclamp_req[UCLAMP_MAX].value;
346 
347 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
348 		util_min = attr->sched_util_min;
349 
350 		if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
351 			return -EINVAL;
352 	}
353 
354 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
355 		util_max = attr->sched_util_max;
356 
357 		if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
358 			return -EINVAL;
359 	}
360 
361 	if (util_min != -1 && util_max != -1 && util_min > util_max)
362 		return -EINVAL;
363 
364 	/*
365 	 * We have valid uclamp attributes; make sure uclamp is enabled.
366 	 *
367 	 * We need to do that here, because enabling static branches is a
368 	 * blocking operation which obviously cannot be done while holding
369 	 * scheduler locks.
370 	 */
371 	static_branch_enable(&sched_uclamp_used);
372 
373 	return 0;
374 }
375 
uclamp_reset(const struct sched_attr * attr,enum uclamp_id clamp_id,struct uclamp_se * uc_se)376 static bool uclamp_reset(const struct sched_attr *attr,
377 			 enum uclamp_id clamp_id,
378 			 struct uclamp_se *uc_se)
379 {
380 	/* Reset on sched class change for a non user-defined clamp value. */
381 	if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
382 	    !uc_se->user_defined)
383 		return true;
384 
385 	/* Reset on sched_util_{min,max} == -1. */
386 	if (clamp_id == UCLAMP_MIN &&
387 	    attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
388 	    attr->sched_util_min == -1) {
389 		return true;
390 	}
391 
392 	if (clamp_id == UCLAMP_MAX &&
393 	    attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
394 	    attr->sched_util_max == -1) {
395 		return true;
396 	}
397 
398 	return false;
399 }
400 
__setscheduler_uclamp(struct task_struct * p,const struct sched_attr * attr)401 static void __setscheduler_uclamp(struct task_struct *p,
402 				  const struct sched_attr *attr)
403 {
404 	enum uclamp_id clamp_id;
405 
406 	for_each_clamp_id(clamp_id) {
407 		struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
408 		unsigned int value;
409 
410 		if (!uclamp_reset(attr, clamp_id, uc_se))
411 			continue;
412 
413 		/*
414 		 * RT by default have a 100% boost value that could be modified
415 		 * at runtime.
416 		 */
417 		if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
418 			value = sysctl_sched_uclamp_util_min_rt_default;
419 		else
420 			value = uclamp_none(clamp_id);
421 
422 		uclamp_se_set(uc_se, value, false);
423 
424 	}
425 
426 	if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
427 		return;
428 
429 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
430 	    attr->sched_util_min != -1) {
431 		uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
432 			      attr->sched_util_min, true);
433 	}
434 
435 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
436 	    attr->sched_util_max != -1) {
437 		uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
438 			      attr->sched_util_max, true);
439 	}
440 }
441 
442 #else /* !CONFIG_UCLAMP_TASK: */
443 
uclamp_validate(struct task_struct * p,const struct sched_attr * attr)444 static inline int uclamp_validate(struct task_struct *p,
445 				  const struct sched_attr *attr)
446 {
447 	return -EOPNOTSUPP;
448 }
__setscheduler_uclamp(struct task_struct * p,const struct sched_attr * attr)449 static void __setscheduler_uclamp(struct task_struct *p,
450 				  const struct sched_attr *attr) { }
451 #endif
452 
453 /*
454  * Allow unprivileged RT tasks to decrease priority.
455  * Only issue a capable test if needed and only once to avoid an audit
456  * event on permitted non-privileged operations:
457  */
user_check_sched_setscheduler(struct task_struct * p,const struct sched_attr * attr,int policy,int reset_on_fork)458 static int user_check_sched_setscheduler(struct task_struct *p,
459 					 const struct sched_attr *attr,
460 					 int policy, int reset_on_fork)
461 {
462 	if (fair_policy(policy)) {
463 		if (attr->sched_nice < task_nice(p) &&
464 		    !is_nice_reduction(p, attr->sched_nice))
465 			goto req_priv;
466 	}
467 
468 	if (rt_policy(policy)) {
469 		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
470 
471 		/* Can't set/change the rt policy: */
472 		if (policy != p->policy && !rlim_rtprio)
473 			goto req_priv;
474 
475 		/* Can't increase priority: */
476 		if (attr->sched_priority > p->rt_priority &&
477 		    attr->sched_priority > rlim_rtprio)
478 			goto req_priv;
479 	}
480 
481 	/*
482 	 * Can't set/change SCHED_DEADLINE policy at all for now
483 	 * (safest behavior); in the future we would like to allow
484 	 * unprivileged DL tasks to increase their relative deadline
485 	 * or reduce their runtime (both ways reducing utilization)
486 	 */
487 	if (dl_policy(policy))
488 		goto req_priv;
489 
490 	/*
491 	 * Treat SCHED_IDLE as nice 20. Only allow a switch to
492 	 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
493 	 */
494 	if (task_has_idle_policy(p) && !idle_policy(policy)) {
495 		if (!is_nice_reduction(p, task_nice(p)))
496 			goto req_priv;
497 	}
498 
499 	/* Can't change other user's priorities: */
500 	if (!check_same_owner(p))
501 		goto req_priv;
502 
503 	/* Normal users shall not reset the sched_reset_on_fork flag: */
504 	if (p->sched_reset_on_fork && !reset_on_fork)
505 		goto req_priv;
506 
507 	return 0;
508 
509 req_priv:
510 	if (!capable(CAP_SYS_NICE))
511 		return -EPERM;
512 
513 	return 0;
514 }
515 
__sched_setscheduler(struct task_struct * p,const struct sched_attr * attr,bool user,bool pi)516 int __sched_setscheduler(struct task_struct *p,
517 			 const struct sched_attr *attr,
518 			 bool user, bool pi)
519 {
520 	int oldpolicy = -1, policy = attr->sched_policy;
521 	int retval, oldprio, newprio, queued, running;
522 	const struct sched_class *prev_class, *next_class;
523 	struct balance_callback *head;
524 	struct rq_flags rf;
525 	int reset_on_fork;
526 	int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
527 	struct rq *rq;
528 	bool cpuset_locked = false;
529 
530 	/* The pi code expects interrupts enabled */
531 	BUG_ON(pi && in_interrupt());
532 recheck:
533 	/* Double check policy once rq lock held: */
534 	if (policy < 0) {
535 		reset_on_fork = p->sched_reset_on_fork;
536 		policy = oldpolicy = p->policy;
537 	} else {
538 		reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
539 
540 		if (!valid_policy(policy))
541 			return -EINVAL;
542 	}
543 
544 	if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
545 		return -EINVAL;
546 
547 	/*
548 	 * Valid priorities for SCHED_FIFO and SCHED_RR are
549 	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
550 	 * SCHED_BATCH and SCHED_IDLE is 0.
551 	 */
552 	if (attr->sched_priority > MAX_RT_PRIO-1)
553 		return -EINVAL;
554 	if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
555 	    (rt_policy(policy) != (attr->sched_priority != 0)))
556 		return -EINVAL;
557 
558 	if (user) {
559 		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
560 		if (retval)
561 			return retval;
562 
563 		if (attr->sched_flags & SCHED_FLAG_SUGOV)
564 			return -EINVAL;
565 
566 		retval = security_task_setscheduler(p);
567 		if (retval)
568 			return retval;
569 	}
570 
571 	/* Update task specific "requested" clamps */
572 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
573 		retval = uclamp_validate(p, attr);
574 		if (retval)
575 			return retval;
576 	}
577 
578 	/*
579 	 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets
580 	 * information.
581 	 */
582 	if (dl_policy(policy) || dl_policy(p->policy)) {
583 		cpuset_locked = true;
584 		cpuset_lock();
585 	}
586 
587 	/*
588 	 * Make sure no PI-waiters arrive (or leave) while we are
589 	 * changing the priority of the task:
590 	 *
591 	 * To be able to change p->policy safely, the appropriate
592 	 * runqueue lock must be held.
593 	 */
594 	rq = task_rq_lock(p, &rf);
595 	update_rq_clock(rq);
596 
597 	/*
598 	 * Changing the policy of the stop threads its a very bad idea:
599 	 */
600 	if (p == rq->stop) {
601 		retval = -EINVAL;
602 		goto unlock;
603 	}
604 
605 	retval = scx_check_setscheduler(p, policy);
606 	if (retval)
607 		goto unlock;
608 
609 	/*
610 	 * If not changing anything there's no need to proceed further,
611 	 * but store a possible modification of reset_on_fork.
612 	 */
613 	if (unlikely(policy == p->policy)) {
614 		if (fair_policy(policy) &&
615 		    (attr->sched_nice != task_nice(p) ||
616 		     (attr->sched_runtime != p->se.slice)))
617 			goto change;
618 		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
619 			goto change;
620 		if (dl_policy(policy) && dl_param_changed(p, attr))
621 			goto change;
622 		if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
623 			goto change;
624 
625 		p->sched_reset_on_fork = reset_on_fork;
626 		retval = 0;
627 		goto unlock;
628 	}
629 change:
630 
631 	if (user) {
632 #ifdef CONFIG_RT_GROUP_SCHED
633 		/*
634 		 * Do not allow real-time tasks into groups that have no runtime
635 		 * assigned.
636 		 */
637 		if (rt_bandwidth_enabled() && rt_policy(policy) &&
638 				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
639 				!task_group_is_autogroup(task_group(p))) {
640 			retval = -EPERM;
641 			goto unlock;
642 		}
643 #endif
644 #ifdef CONFIG_SMP
645 		if (dl_bandwidth_enabled() && dl_policy(policy) &&
646 				!(attr->sched_flags & SCHED_FLAG_SUGOV)) {
647 			cpumask_t *span = rq->rd->span;
648 
649 			/*
650 			 * Don't allow tasks with an affinity mask smaller than
651 			 * the entire root_domain to become SCHED_DEADLINE. We
652 			 * will also fail if there's no bandwidth available.
653 			 */
654 			if (!cpumask_subset(span, p->cpus_ptr) ||
655 			    rq->rd->dl_bw.bw == 0) {
656 				retval = -EPERM;
657 				goto unlock;
658 			}
659 		}
660 #endif
661 	}
662 
663 	/* Re-check policy now with rq lock held: */
664 	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
665 		policy = oldpolicy = -1;
666 		task_rq_unlock(rq, p, &rf);
667 		if (cpuset_locked)
668 			cpuset_unlock();
669 		goto recheck;
670 	}
671 
672 	/*
673 	 * If setscheduling to SCHED_DEADLINE (or changing the parameters
674 	 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
675 	 * is available.
676 	 */
677 	if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
678 		retval = -EBUSY;
679 		goto unlock;
680 	}
681 
682 	p->sched_reset_on_fork = reset_on_fork;
683 	oldprio = p->prio;
684 
685 	newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
686 	if (pi) {
687 		/*
688 		 * Take priority boosted tasks into account. If the new
689 		 * effective priority is unchanged, we just store the new
690 		 * normal parameters and do not touch the scheduler class and
691 		 * the runqueue. This will be done when the task deboost
692 		 * itself.
693 		 */
694 		newprio = rt_effective_prio(p, newprio);
695 		if (newprio == oldprio)
696 			queue_flags &= ~DEQUEUE_MOVE;
697 	}
698 
699 	prev_class = p->sched_class;
700 	next_class = __setscheduler_class(policy, newprio);
701 
702 	if (prev_class != next_class && p->se.sched_delayed)
703 		dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
704 
705 	queued = task_on_rq_queued(p);
706 	running = task_current_donor(rq, p);
707 	if (queued)
708 		dequeue_task(rq, p, queue_flags);
709 	if (running)
710 		put_prev_task(rq, p);
711 
712 	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
713 		__setscheduler_params(p, attr);
714 		p->sched_class = next_class;
715 		p->prio = newprio;
716 	}
717 	__setscheduler_uclamp(p, attr);
718 	check_class_changing(rq, p, prev_class);
719 
720 	if (queued) {
721 		/*
722 		 * We enqueue to tail when the priority of a task is
723 		 * increased (user space view).
724 		 */
725 		if (oldprio < p->prio)
726 			queue_flags |= ENQUEUE_HEAD;
727 
728 		enqueue_task(rq, p, queue_flags);
729 	}
730 	if (running)
731 		set_next_task(rq, p);
732 
733 	check_class_changed(rq, p, prev_class, oldprio);
734 
735 	/* Avoid rq from going away on us: */
736 	preempt_disable();
737 	head = splice_balance_callbacks(rq);
738 	task_rq_unlock(rq, p, &rf);
739 
740 	if (pi) {
741 		if (cpuset_locked)
742 			cpuset_unlock();
743 		rt_mutex_adjust_pi(p);
744 	}
745 
746 	/* Run balance callbacks after we've adjusted the PI chain: */
747 	balance_callbacks(rq, head);
748 	preempt_enable();
749 
750 	return 0;
751 
752 unlock:
753 	task_rq_unlock(rq, p, &rf);
754 	if (cpuset_locked)
755 		cpuset_unlock();
756 	return retval;
757 }
758 
_sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param,bool check)759 static int _sched_setscheduler(struct task_struct *p, int policy,
760 			       const struct sched_param *param, bool check)
761 {
762 	struct sched_attr attr = {
763 		.sched_policy   = policy,
764 		.sched_priority = param->sched_priority,
765 		.sched_nice	= PRIO_TO_NICE(p->static_prio),
766 	};
767 
768 	if (p->se.custom_slice)
769 		attr.sched_runtime = p->se.slice;
770 
771 	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
772 	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
773 		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
774 		policy &= ~SCHED_RESET_ON_FORK;
775 		attr.sched_policy = policy;
776 	}
777 
778 	return __sched_setscheduler(p, &attr, check, true);
779 }
780 /**
781  * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
782  * @p: the task in question.
783  * @policy: new policy.
784  * @param: structure containing the new RT priority.
785  *
786  * Use sched_set_fifo(), read its comment.
787  *
788  * Return: 0 on success. An error code otherwise.
789  *
790  * NOTE that the task may be already dead.
791  */
sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param)792 int sched_setscheduler(struct task_struct *p, int policy,
793 		       const struct sched_param *param)
794 {
795 	return _sched_setscheduler(p, policy, param, true);
796 }
797 
sched_setattr(struct task_struct * p,const struct sched_attr * attr)798 int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
799 {
800 	return __sched_setscheduler(p, attr, true, true);
801 }
802 
sched_setattr_nocheck(struct task_struct * p,const struct sched_attr * attr)803 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
804 {
805 	return __sched_setscheduler(p, attr, false, true);
806 }
807 EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
808 
809 /**
810  * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernel-space.
811  * @p: the task in question.
812  * @policy: new policy.
813  * @param: structure containing the new RT priority.
814  *
815  * Just like sched_setscheduler, only don't bother checking if the
816  * current context has permission.  For example, this is needed in
817  * stop_machine(): we create temporary high priority worker threads,
818  * but our caller might not have that capability.
819  *
820  * Return: 0 on success. An error code otherwise.
821  */
sched_setscheduler_nocheck(struct task_struct * p,int policy,const struct sched_param * param)822 int sched_setscheduler_nocheck(struct task_struct *p, int policy,
823 			       const struct sched_param *param)
824 {
825 	return _sched_setscheduler(p, policy, param, false);
826 }
827 
828 /*
829  * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
830  * incapable of resource management, which is the one thing an OS really should
831  * be doing.
832  *
833  * This is of course the reason it is limited to privileged users only.
834  *
835  * Worse still; it is fundamentally impossible to compose static priority
836  * workloads. You cannot take two correctly working static prio workloads
837  * and smash them together and still expect them to work.
838  *
839  * For this reason 'all' FIFO tasks the kernel creates are basically at:
840  *
841  *   MAX_RT_PRIO / 2
842  *
843  * The administrator _MUST_ configure the system, the kernel simply doesn't
844  * know enough information to make a sensible choice.
845  */
sched_set_fifo(struct task_struct * p)846 void sched_set_fifo(struct task_struct *p)
847 {
848 	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
849 	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
850 }
851 EXPORT_SYMBOL_GPL(sched_set_fifo);
852 
853 /*
854  * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
855  */
sched_set_fifo_low(struct task_struct * p)856 void sched_set_fifo_low(struct task_struct *p)
857 {
858 	struct sched_param sp = { .sched_priority = 1 };
859 	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
860 }
861 EXPORT_SYMBOL_GPL(sched_set_fifo_low);
862 
sched_set_normal(struct task_struct * p,int nice)863 void sched_set_normal(struct task_struct *p, int nice)
864 {
865 	struct sched_attr attr = {
866 		.sched_policy = SCHED_NORMAL,
867 		.sched_nice = nice,
868 	};
869 	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
870 }
871 EXPORT_SYMBOL_GPL(sched_set_normal);
872 
873 static int
do_sched_setscheduler(pid_t pid,int policy,struct sched_param __user * param)874 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
875 {
876 	struct sched_param lparam;
877 
878 	if (!param || pid < 0)
879 		return -EINVAL;
880 	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
881 		return -EFAULT;
882 
883 	CLASS(find_get_task, p)(pid);
884 	if (!p)
885 		return -ESRCH;
886 
887 	return sched_setscheduler(p, policy, &lparam);
888 }
889 
890 /*
891  * Mimics kernel/events/core.c perf_copy_attr().
892  */
sched_copy_attr(struct sched_attr __user * uattr,struct sched_attr * attr)893 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
894 {
895 	u32 size;
896 	int ret;
897 
898 	/* Zero the full structure, so that a short copy will be nice: */
899 	memset(attr, 0, sizeof(*attr));
900 
901 	ret = get_user(size, &uattr->size);
902 	if (ret)
903 		return ret;
904 
905 	/* ABI compatibility quirk: */
906 	if (!size)
907 		size = SCHED_ATTR_SIZE_VER0;
908 	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
909 		goto err_size;
910 
911 	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
912 	if (ret) {
913 		if (ret == -E2BIG)
914 			goto err_size;
915 		return ret;
916 	}
917 
918 	if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
919 	    size < SCHED_ATTR_SIZE_VER1)
920 		return -EINVAL;
921 
922 	/*
923 	 * XXX: Do we want to be lenient like existing syscalls; or do we want
924 	 * to be strict and return an error on out-of-bounds values?
925 	 */
926 	attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
927 
928 	return 0;
929 
930 err_size:
931 	put_user(sizeof(*attr), &uattr->size);
932 	return -E2BIG;
933 }
934 
get_params(struct task_struct * p,struct sched_attr * attr)935 static void get_params(struct task_struct *p, struct sched_attr *attr)
936 {
937 	if (task_has_dl_policy(p)) {
938 		__getparam_dl(p, attr);
939 	} else if (task_has_rt_policy(p)) {
940 		attr->sched_priority = p->rt_priority;
941 	} else {
942 		attr->sched_nice = task_nice(p);
943 		attr->sched_runtime = p->se.slice;
944 	}
945 }
946 
947 /**
948  * sys_sched_setscheduler - set/change the scheduler policy and RT priority
949  * @pid: the pid in question.
950  * @policy: new policy.
951  * @param: structure containing the new RT priority.
952  *
953  * Return: 0 on success. An error code otherwise.
954  */
SYSCALL_DEFINE3(sched_setscheduler,pid_t,pid,int,policy,struct sched_param __user *,param)955 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
956 {
957 	if (policy < 0)
958 		return -EINVAL;
959 
960 	return do_sched_setscheduler(pid, policy, param);
961 }
962 
963 /**
964  * sys_sched_setparam - set/change the RT priority of a thread
965  * @pid: the pid in question.
966  * @param: structure containing the new RT priority.
967  *
968  * Return: 0 on success. An error code otherwise.
969  */
SYSCALL_DEFINE2(sched_setparam,pid_t,pid,struct sched_param __user *,param)970 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
971 {
972 	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
973 }
974 
975 /**
976  * sys_sched_setattr - same as above, but with extended sched_attr
977  * @pid: the pid in question.
978  * @uattr: structure containing the extended parameters.
979  * @flags: for future extension.
980  */
SYSCALL_DEFINE3(sched_setattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,flags)981 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
982 			       unsigned int, flags)
983 {
984 	struct sched_attr attr;
985 	int retval;
986 
987 	if (!uattr || pid < 0 || flags)
988 		return -EINVAL;
989 
990 	retval = sched_copy_attr(uattr, &attr);
991 	if (retval)
992 		return retval;
993 
994 	if ((int)attr.sched_policy < 0)
995 		return -EINVAL;
996 	if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
997 		attr.sched_policy = SETPARAM_POLICY;
998 
999 	CLASS(find_get_task, p)(pid);
1000 	if (!p)
1001 		return -ESRCH;
1002 
1003 	if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
1004 		get_params(p, &attr);
1005 
1006 	return sched_setattr(p, &attr);
1007 }
1008 
1009 /**
1010  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
1011  * @pid: the pid in question.
1012  *
1013  * Return: On success, the policy of the thread. Otherwise, a negative error
1014  * code.
1015  */
SYSCALL_DEFINE1(sched_getscheduler,pid_t,pid)1016 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1017 {
1018 	struct task_struct *p;
1019 	int retval;
1020 
1021 	if (pid < 0)
1022 		return -EINVAL;
1023 
1024 	guard(rcu)();
1025 	p = find_process_by_pid(pid);
1026 	if (!p)
1027 		return -ESRCH;
1028 
1029 	retval = security_task_getscheduler(p);
1030 	if (!retval) {
1031 		retval = p->policy;
1032 		if (p->sched_reset_on_fork)
1033 			retval |= SCHED_RESET_ON_FORK;
1034 	}
1035 	return retval;
1036 }
1037 
1038 /**
1039  * sys_sched_getparam - get the RT priority of a thread
1040  * @pid: the pid in question.
1041  * @param: structure containing the RT priority.
1042  *
1043  * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
1044  * code.
1045  */
SYSCALL_DEFINE2(sched_getparam,pid_t,pid,struct sched_param __user *,param)1046 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1047 {
1048 	struct sched_param lp = { .sched_priority = 0 };
1049 	struct task_struct *p;
1050 	int retval;
1051 
1052 	if (!param || pid < 0)
1053 		return -EINVAL;
1054 
1055 	scoped_guard (rcu) {
1056 		p = find_process_by_pid(pid);
1057 		if (!p)
1058 			return -ESRCH;
1059 
1060 		retval = security_task_getscheduler(p);
1061 		if (retval)
1062 			return retval;
1063 
1064 		if (task_has_rt_policy(p))
1065 			lp.sched_priority = p->rt_priority;
1066 	}
1067 
1068 	/*
1069 	 * This one might sleep, we cannot do it with a spinlock held ...
1070 	 */
1071 	return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
1072 }
1073 
1074 /**
1075  * sys_sched_getattr - similar to sched_getparam, but with sched_attr
1076  * @pid: the pid in question.
1077  * @uattr: structure containing the extended parameters.
1078  * @usize: sizeof(attr) for fwd/bwd comp.
1079  * @flags: for future extension.
1080  */
SYSCALL_DEFINE4(sched_getattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,usize,unsigned int,flags)1081 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
1082 		unsigned int, usize, unsigned int, flags)
1083 {
1084 	struct sched_attr kattr = { };
1085 	struct task_struct *p;
1086 	int retval;
1087 
1088 	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
1089 	    usize < SCHED_ATTR_SIZE_VER0 || flags)
1090 		return -EINVAL;
1091 
1092 	scoped_guard (rcu) {
1093 		p = find_process_by_pid(pid);
1094 		if (!p)
1095 			return -ESRCH;
1096 
1097 		retval = security_task_getscheduler(p);
1098 		if (retval)
1099 			return retval;
1100 
1101 		kattr.sched_policy = p->policy;
1102 		if (p->sched_reset_on_fork)
1103 			kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
1104 		get_params(p, &kattr);
1105 		kattr.sched_flags &= SCHED_FLAG_ALL;
1106 
1107 #ifdef CONFIG_UCLAMP_TASK
1108 		/*
1109 		 * This could race with another potential updater, but this is fine
1110 		 * because it'll correctly read the old or the new value. We don't need
1111 		 * to guarantee who wins the race as long as it doesn't return garbage.
1112 		 */
1113 		kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
1114 		kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
1115 #endif
1116 	}
1117 
1118 	kattr.size = min(usize, sizeof(kattr));
1119 	return copy_struct_to_user(uattr, usize, &kattr, sizeof(kattr), NULL);
1120 }
1121 
1122 #ifdef CONFIG_SMP
dl_task_check_affinity(struct task_struct * p,const struct cpumask * mask)1123 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
1124 {
1125 	/*
1126 	 * If the task isn't a deadline task or admission control is
1127 	 * disabled then we don't care about affinity changes.
1128 	 */
1129 	if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
1130 		return 0;
1131 
1132 	/*
1133 	 * The special/sugov task isn't part of regular bandwidth/admission
1134 	 * control so let userspace change affinities.
1135 	 */
1136 	if (dl_entity_is_special(&p->dl))
1137 		return 0;
1138 
1139 	/*
1140 	 * Since bandwidth control happens on root_domain basis,
1141 	 * if admission test is enabled, we only admit -deadline
1142 	 * tasks allowed to run on all the CPUs in the task's
1143 	 * root_domain.
1144 	 */
1145 	guard(rcu)();
1146 	if (!cpumask_subset(task_rq(p)->rd->span, mask))
1147 		return -EBUSY;
1148 
1149 	return 0;
1150 }
1151 #endif /* CONFIG_SMP */
1152 
__sched_setaffinity(struct task_struct * p,struct affinity_context * ctx)1153 int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
1154 {
1155 	int retval;
1156 	cpumask_var_t cpus_allowed, new_mask;
1157 
1158 	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
1159 		return -ENOMEM;
1160 
1161 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
1162 		retval = -ENOMEM;
1163 		goto out_free_cpus_allowed;
1164 	}
1165 
1166 	cpuset_cpus_allowed(p, cpus_allowed);
1167 	cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
1168 
1169 	ctx->new_mask = new_mask;
1170 	ctx->flags |= SCA_CHECK;
1171 
1172 	retval = dl_task_check_affinity(p, new_mask);
1173 	if (retval)
1174 		goto out_free_new_mask;
1175 
1176 	retval = __set_cpus_allowed_ptr(p, ctx);
1177 	if (retval)
1178 		goto out_free_new_mask;
1179 
1180 	cpuset_cpus_allowed(p, cpus_allowed);
1181 	if (!cpumask_subset(new_mask, cpus_allowed)) {
1182 		/*
1183 		 * We must have raced with a concurrent cpuset update.
1184 		 * Just reset the cpumask to the cpuset's cpus_allowed.
1185 		 */
1186 		cpumask_copy(new_mask, cpus_allowed);
1187 
1188 		/*
1189 		 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
1190 		 * will restore the previous user_cpus_ptr value.
1191 		 *
1192 		 * In the unlikely event a previous user_cpus_ptr exists,
1193 		 * we need to further restrict the mask to what is allowed
1194 		 * by that old user_cpus_ptr.
1195 		 */
1196 		if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
1197 			bool empty = !cpumask_and(new_mask, new_mask,
1198 						  ctx->user_mask);
1199 
1200 			if (empty)
1201 				cpumask_copy(new_mask, cpus_allowed);
1202 		}
1203 		__set_cpus_allowed_ptr(p, ctx);
1204 		retval = -EINVAL;
1205 	}
1206 
1207 out_free_new_mask:
1208 	free_cpumask_var(new_mask);
1209 out_free_cpus_allowed:
1210 	free_cpumask_var(cpus_allowed);
1211 	return retval;
1212 }
1213 
sched_setaffinity(pid_t pid,const struct cpumask * in_mask)1214 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1215 {
1216 	struct affinity_context ac;
1217 	struct cpumask *user_mask;
1218 	int retval;
1219 
1220 	CLASS(find_get_task, p)(pid);
1221 	if (!p)
1222 		return -ESRCH;
1223 
1224 	if (p->flags & PF_NO_SETAFFINITY)
1225 		return -EINVAL;
1226 
1227 	if (!check_same_owner(p)) {
1228 		guard(rcu)();
1229 		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
1230 			return -EPERM;
1231 	}
1232 
1233 	retval = security_task_setscheduler(p);
1234 	if (retval)
1235 		return retval;
1236 
1237 	/*
1238 	 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
1239 	 * alloc_user_cpus_ptr() returns NULL.
1240 	 */
1241 	user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
1242 	if (user_mask) {
1243 		cpumask_copy(user_mask, in_mask);
1244 	} else if (IS_ENABLED(CONFIG_SMP)) {
1245 		return -ENOMEM;
1246 	}
1247 
1248 	ac = (struct affinity_context){
1249 		.new_mask  = in_mask,
1250 		.user_mask = user_mask,
1251 		.flags     = SCA_USER,
1252 	};
1253 
1254 	retval = __sched_setaffinity(p, &ac);
1255 	kfree(ac.user_mask);
1256 
1257 	return retval;
1258 }
1259 
get_user_cpu_mask(unsigned long __user * user_mask_ptr,unsigned len,struct cpumask * new_mask)1260 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
1261 			     struct cpumask *new_mask)
1262 {
1263 	if (len < cpumask_size())
1264 		cpumask_clear(new_mask);
1265 	else if (len > cpumask_size())
1266 		len = cpumask_size();
1267 
1268 	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
1269 }
1270 
1271 /**
1272  * sys_sched_setaffinity - set the CPU affinity of a process
1273  * @pid: pid of the process
1274  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
1275  * @user_mask_ptr: user-space pointer to the new CPU mask
1276  *
1277  * Return: 0 on success. An error code otherwise.
1278  */
SYSCALL_DEFINE3(sched_setaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)1279 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
1280 		unsigned long __user *, user_mask_ptr)
1281 {
1282 	cpumask_var_t new_mask;
1283 	int retval;
1284 
1285 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
1286 		return -ENOMEM;
1287 
1288 	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
1289 	if (retval == 0)
1290 		retval = sched_setaffinity(pid, new_mask);
1291 	free_cpumask_var(new_mask);
1292 	return retval;
1293 }
1294 
sched_getaffinity(pid_t pid,struct cpumask * mask)1295 long sched_getaffinity(pid_t pid, struct cpumask *mask)
1296 {
1297 	struct task_struct *p;
1298 	int retval;
1299 
1300 	guard(rcu)();
1301 	p = find_process_by_pid(pid);
1302 	if (!p)
1303 		return -ESRCH;
1304 
1305 	retval = security_task_getscheduler(p);
1306 	if (retval)
1307 		return retval;
1308 
1309 	guard(raw_spinlock_irqsave)(&p->pi_lock);
1310 	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
1311 
1312 	return 0;
1313 }
1314 
1315 /**
1316  * sys_sched_getaffinity - get the CPU affinity of a process
1317  * @pid: pid of the process
1318  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
1319  * @user_mask_ptr: user-space pointer to hold the current CPU mask
1320  *
1321  * Return: size of CPU mask copied to user_mask_ptr on success. An
1322  * error code otherwise.
1323  */
SYSCALL_DEFINE3(sched_getaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)1324 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
1325 		unsigned long __user *, user_mask_ptr)
1326 {
1327 	int ret;
1328 	cpumask_var_t mask;
1329 
1330 	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
1331 		return -EINVAL;
1332 	if (len & (sizeof(unsigned long)-1))
1333 		return -EINVAL;
1334 
1335 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1336 		return -ENOMEM;
1337 
1338 	ret = sched_getaffinity(pid, mask);
1339 	if (ret == 0) {
1340 		unsigned int retlen = min(len, cpumask_size());
1341 
1342 		if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
1343 			ret = -EFAULT;
1344 		else
1345 			ret = retlen;
1346 	}
1347 	free_cpumask_var(mask);
1348 
1349 	return ret;
1350 }
1351 
do_sched_yield(void)1352 static void do_sched_yield(void)
1353 {
1354 	struct rq_flags rf;
1355 	struct rq *rq;
1356 
1357 	rq = this_rq_lock_irq(&rf);
1358 
1359 	schedstat_inc(rq->yld_count);
1360 	current->sched_class->yield_task(rq);
1361 
1362 	preempt_disable();
1363 	rq_unlock_irq(rq, &rf);
1364 	sched_preempt_enable_no_resched();
1365 
1366 	schedule();
1367 }
1368 
1369 /**
1370  * sys_sched_yield - yield the current processor to other threads.
1371  *
1372  * This function yields the current CPU to other tasks. If there are no
1373  * other threads running on this CPU then this function will return.
1374  *
1375  * Return: 0.
1376  */
SYSCALL_DEFINE0(sched_yield)1377 SYSCALL_DEFINE0(sched_yield)
1378 {
1379 	do_sched_yield();
1380 	return 0;
1381 }
1382 
1383 /**
1384  * yield - yield the current processor to other threads.
1385  *
1386  * Do not ever use this function, there's a 99% chance you're doing it wrong.
1387  *
1388  * The scheduler is at all times free to pick the calling task as the most
1389  * eligible task to run, if removing the yield() call from your code breaks
1390  * it, it's already broken.
1391  *
1392  * Typical broken usage is:
1393  *
1394  * while (!event)
1395  *	yield();
1396  *
1397  * where one assumes that yield() will let 'the other' process run that will
1398  * make event true. If the current task is a SCHED_FIFO task that will never
1399  * happen. Never use yield() as a progress guarantee!!
1400  *
1401  * If you want to use yield() to wait for something, use wait_event().
1402  * If you want to use yield() to be 'nice' for others, use cond_resched().
1403  * If you still want to use yield(), do not!
1404  */
yield(void)1405 void __sched yield(void)
1406 {
1407 	set_current_state(TASK_RUNNING);
1408 	do_sched_yield();
1409 }
1410 EXPORT_SYMBOL(yield);
1411 
1412 /**
1413  * yield_to - yield the current processor to another thread in
1414  * your thread group, or accelerate that thread toward the
1415  * processor it's on.
1416  * @p: target task
1417  * @preempt: whether task preemption is allowed or not
1418  *
1419  * It's the caller's job to ensure that the target task struct
1420  * can't go away on us before we can do any checks.
1421  *
1422  * Return:
1423  *	true (>0) if we indeed boosted the target task.
1424  *	false (0) if we failed to boost the target.
1425  *	-ESRCH if there's no task to yield to.
1426  */
yield_to(struct task_struct * p,bool preempt)1427 int __sched yield_to(struct task_struct *p, bool preempt)
1428 {
1429 	struct task_struct *curr = current;
1430 	struct rq *rq, *p_rq;
1431 	int yielded = 0;
1432 
1433 	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
1434 		rq = this_rq();
1435 
1436 again:
1437 		p_rq = task_rq(p);
1438 		/*
1439 		 * If we're the only runnable task on the rq and target rq also
1440 		 * has only one task, there's absolutely no point in yielding.
1441 		 */
1442 		if (rq->nr_running == 1 && p_rq->nr_running == 1)
1443 			return -ESRCH;
1444 
1445 		guard(double_rq_lock)(rq, p_rq);
1446 		if (task_rq(p) != p_rq)
1447 			goto again;
1448 
1449 		if (!curr->sched_class->yield_to_task)
1450 			return 0;
1451 
1452 		if (curr->sched_class != p->sched_class)
1453 			return 0;
1454 
1455 		if (task_on_cpu(p_rq, p) || !task_is_running(p))
1456 			return 0;
1457 
1458 		yielded = curr->sched_class->yield_to_task(rq, p);
1459 		if (yielded) {
1460 			schedstat_inc(rq->yld_count);
1461 			/*
1462 			 * Make p's CPU reschedule; pick_next_entity
1463 			 * takes care of fairness.
1464 			 */
1465 			if (preempt && rq != p_rq)
1466 				resched_curr(p_rq);
1467 		}
1468 	}
1469 
1470 	if (yielded)
1471 		schedule();
1472 
1473 	return yielded;
1474 }
1475 EXPORT_SYMBOL_GPL(yield_to);
1476 
1477 /**
1478  * sys_sched_get_priority_max - return maximum RT priority.
1479  * @policy: scheduling class.
1480  *
1481  * Return: On success, this syscall returns the maximum
1482  * rt_priority that can be used by a given scheduling class.
1483  * On failure, a negative error code is returned.
1484  */
SYSCALL_DEFINE1(sched_get_priority_max,int,policy)1485 SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1486 {
1487 	int ret = -EINVAL;
1488 
1489 	switch (policy) {
1490 	case SCHED_FIFO:
1491 	case SCHED_RR:
1492 		ret = MAX_RT_PRIO-1;
1493 		break;
1494 	case SCHED_DEADLINE:
1495 	case SCHED_NORMAL:
1496 	case SCHED_BATCH:
1497 	case SCHED_IDLE:
1498 	case SCHED_EXT:
1499 		ret = 0;
1500 		break;
1501 	}
1502 	return ret;
1503 }
1504 
1505 /**
1506  * sys_sched_get_priority_min - return minimum RT priority.
1507  * @policy: scheduling class.
1508  *
1509  * Return: On success, this syscall returns the minimum
1510  * rt_priority that can be used by a given scheduling class.
1511  * On failure, a negative error code is returned.
1512  */
SYSCALL_DEFINE1(sched_get_priority_min,int,policy)1513 SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1514 {
1515 	int ret = -EINVAL;
1516 
1517 	switch (policy) {
1518 	case SCHED_FIFO:
1519 	case SCHED_RR:
1520 		ret = 1;
1521 		break;
1522 	case SCHED_DEADLINE:
1523 	case SCHED_NORMAL:
1524 	case SCHED_BATCH:
1525 	case SCHED_IDLE:
1526 	case SCHED_EXT:
1527 		ret = 0;
1528 	}
1529 	return ret;
1530 }
1531 
sched_rr_get_interval(pid_t pid,struct timespec64 * t)1532 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
1533 {
1534 	unsigned int time_slice = 0;
1535 	int retval;
1536 
1537 	if (pid < 0)
1538 		return -EINVAL;
1539 
1540 	scoped_guard (rcu) {
1541 		struct task_struct *p = find_process_by_pid(pid);
1542 		if (!p)
1543 			return -ESRCH;
1544 
1545 		retval = security_task_getscheduler(p);
1546 		if (retval)
1547 			return retval;
1548 
1549 		scoped_guard (task_rq_lock, p) {
1550 			struct rq *rq = scope.rq;
1551 			if (p->sched_class->get_rr_interval)
1552 				time_slice = p->sched_class->get_rr_interval(rq, p);
1553 		}
1554 	}
1555 
1556 	jiffies_to_timespec64(time_slice, t);
1557 	return 0;
1558 }
1559 
1560 /**
1561  * sys_sched_rr_get_interval - return the default time-slice of a process.
1562  * @pid: pid of the process.
1563  * @interval: userspace pointer to the time-slice value.
1564  *
1565  * this syscall writes the default time-slice value of a given process
1566  * into the user-space timespec buffer. A value of '0' means infinity.
1567  *
1568  * Return: On success, 0 and the time-slice is in @interval. Otherwise,
1569  * an error code.
1570  */
SYSCALL_DEFINE2(sched_rr_get_interval,pid_t,pid,struct __kernel_timespec __user *,interval)1571 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
1572 		struct __kernel_timespec __user *, interval)
1573 {
1574 	struct timespec64 t;
1575 	int retval = sched_rr_get_interval(pid, &t);
1576 
1577 	if (retval == 0)
1578 		retval = put_timespec64(&t, interval);
1579 
1580 	return retval;
1581 }
1582 
1583 #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE2(sched_rr_get_interval_time32,pid_t,pid,struct old_timespec32 __user *,interval)1584 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
1585 		struct old_timespec32 __user *, interval)
1586 {
1587 	struct timespec64 t;
1588 	int retval = sched_rr_get_interval(pid, &t);
1589 
1590 	if (retval == 0)
1591 		retval = put_old_timespec32(&t, interval);
1592 	return retval;
1593 }
1594 #endif
1595