xref: /linux/kernel/sched/syscalls.c (revision 96fd6c65efc652e9054163e6d3cf254b9e5b93d2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  kernel/sched/syscalls.c
4  *
5  *  Core kernel scheduler syscalls related code
6  *
7  *  Copyright (C) 1991-2002  Linus Torvalds
8  *  Copyright (C) 1998-2024  Ingo Molnar, Red Hat
9  */
10 #include <linux/sched.h>
11 #include <linux/cpuset.h>
12 #include <linux/sched/debug.h>
13 
14 #include <uapi/linux/sched/types.h>
15 
16 #include "sched.h"
17 #include "autogroup.h"
18 
19 static inline int __normal_prio(int policy, int rt_prio, int nice)
20 {
21 	int prio;
22 
23 	if (dl_policy(policy))
24 		prio = MAX_DL_PRIO - 1;
25 	else if (rt_policy(policy))
26 		prio = MAX_RT_PRIO - 1 - rt_prio;
27 	else
28 		prio = NICE_TO_PRIO(nice);
29 
30 	return prio;
31 }
32 
33 /*
34  * Calculate the expected normal priority: i.e. priority
35  * without taking RT-inheritance into account. Might be
36  * boosted by interactivity modifiers. Changes upon fork,
37  * setprio syscalls, and whenever the interactivity
38  * estimator recalculates.
39  */
40 static inline int normal_prio(struct task_struct *p)
41 {
42 	return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
43 }
44 
45 /*
46  * Calculate the current priority, i.e. the priority
47  * taken into account by the scheduler. This value might
48  * be boosted by RT tasks, or might be boosted by
49  * interactivity modifiers. Will be RT if the task got
50  * RT-boosted. If not then it returns p->normal_prio.
51  */
52 static int effective_prio(struct task_struct *p)
53 {
54 	p->normal_prio = normal_prio(p);
55 	/*
56 	 * If we are RT tasks or we were boosted to RT priority,
57 	 * keep the priority unchanged. Otherwise, update priority
58 	 * to the normal priority:
59 	 */
60 	if (!rt_prio(p->prio))
61 		return p->normal_prio;
62 	return p->prio;
63 }
64 
65 void set_user_nice(struct task_struct *p, long nice)
66 {
67 	bool queued, running;
68 	struct rq *rq;
69 	int old_prio;
70 
71 	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
72 		return;
73 	/*
74 	 * We have to be careful, if called from sys_setpriority(),
75 	 * the task might be in the middle of scheduling on another CPU.
76 	 */
77 	CLASS(task_rq_lock, rq_guard)(p);
78 	rq = rq_guard.rq;
79 
80 	update_rq_clock(rq);
81 
82 	/*
83 	 * The RT priorities are set via sched_setscheduler(), but we still
84 	 * allow the 'normal' nice value to be set - but as expected
85 	 * it won't have any effect on scheduling until the task is
86 	 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
87 	 */
88 	if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
89 		p->static_prio = NICE_TO_PRIO(nice);
90 		return;
91 	}
92 
93 	queued = task_on_rq_queued(p);
94 	running = task_current(rq, p);
95 	if (queued)
96 		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
97 	if (running)
98 		put_prev_task(rq, p);
99 
100 	p->static_prio = NICE_TO_PRIO(nice);
101 	set_load_weight(p, true);
102 	old_prio = p->prio;
103 	p->prio = effective_prio(p);
104 
105 	if (queued)
106 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
107 	if (running)
108 		set_next_task(rq, p);
109 
110 	/*
111 	 * If the task increased its priority or is running and
112 	 * lowered its priority, then reschedule its CPU:
113 	 */
114 	p->sched_class->prio_changed(rq, p, old_prio);
115 }
116 EXPORT_SYMBOL(set_user_nice);
117 
118 /*
119  * is_nice_reduction - check if nice value is an actual reduction
120  *
121  * Similar to can_nice() but does not perform a capability check.
122  *
123  * @p: task
124  * @nice: nice value
125  */
126 static bool is_nice_reduction(const struct task_struct *p, const int nice)
127 {
128 	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
129 	int nice_rlim = nice_to_rlimit(nice);
130 
131 	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
132 }
133 
134 /*
135  * can_nice - check if a task can reduce its nice value
136  * @p: task
137  * @nice: nice value
138  */
139 int can_nice(const struct task_struct *p, const int nice)
140 {
141 	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
142 }
143 
144 #ifdef __ARCH_WANT_SYS_NICE
145 
146 /*
147  * sys_nice - change the priority of the current process.
148  * @increment: priority increment
149  *
150  * sys_setpriority is a more generic, but much slower function that
151  * does similar things.
152  */
153 SYSCALL_DEFINE1(nice, int, increment)
154 {
155 	long nice, retval;
156 
157 	/*
158 	 * Setpriority might change our priority at the same moment.
159 	 * We don't have to worry. Conceptually one call occurs first
160 	 * and we have a single winner.
161 	 */
162 	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
163 	nice = task_nice(current) + increment;
164 
165 	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
166 	if (increment < 0 && !can_nice(current, nice))
167 		return -EPERM;
168 
169 	retval = security_task_setnice(current, nice);
170 	if (retval)
171 		return retval;
172 
173 	set_user_nice(current, nice);
174 	return 0;
175 }
176 
177 #endif
178 
179 /**
180  * task_prio - return the priority value of a given task.
181  * @p: the task in question.
182  *
183  * Return: The priority value as seen by users in /proc.
184  *
185  * sched policy         return value   kernel prio    user prio/nice
186  *
187  * normal, batch, idle     [0 ... 39]  [100 ... 139]          0/[-20 ... 19]
188  * fifo, rr             [-2 ... -100]     [98 ... 0]  [1 ... 99]
189  * deadline                     -101             -1           0
190  */
191 int task_prio(const struct task_struct *p)
192 {
193 	return p->prio - MAX_RT_PRIO;
194 }
195 
196 /**
197  * idle_cpu - is a given CPU idle currently?
198  * @cpu: the processor in question.
199  *
200  * Return: 1 if the CPU is currently idle. 0 otherwise.
201  */
202 int idle_cpu(int cpu)
203 {
204 	struct rq *rq = cpu_rq(cpu);
205 
206 	if (rq->curr != rq->idle)
207 		return 0;
208 
209 	if (rq->nr_running)
210 		return 0;
211 
212 #ifdef CONFIG_SMP
213 	if (rq->ttwu_pending)
214 		return 0;
215 #endif
216 
217 	return 1;
218 }
219 
220 /**
221  * available_idle_cpu - is a given CPU idle for enqueuing work.
222  * @cpu: the CPU in question.
223  *
224  * Return: 1 if the CPU is currently idle. 0 otherwise.
225  */
226 int available_idle_cpu(int cpu)
227 {
228 	if (!idle_cpu(cpu))
229 		return 0;
230 
231 	if (vcpu_is_preempted(cpu))
232 		return 0;
233 
234 	return 1;
235 }
236 
237 /**
238  * idle_task - return the idle task for a given CPU.
239  * @cpu: the processor in question.
240  *
241  * Return: The idle task for the CPU @cpu.
242  */
243 struct task_struct *idle_task(int cpu)
244 {
245 	return cpu_rq(cpu)->idle;
246 }
247 
248 #ifdef CONFIG_SCHED_CORE
249 int sched_core_idle_cpu(int cpu)
250 {
251 	struct rq *rq = cpu_rq(cpu);
252 
253 	if (sched_core_enabled(rq) && rq->curr == rq->idle)
254 		return 1;
255 
256 	return idle_cpu(cpu);
257 }
258 
259 #endif
260 
261 #ifdef CONFIG_SMP
262 /*
263  * Load avg and utiliztion metrics need to be updated periodically and before
264  * consumption. This function updates the metrics for all subsystems except for
265  * the fair class. @rq must be locked and have its clock updated.
266  */
267 bool update_other_load_avgs(struct rq *rq)
268 {
269 	u64 now = rq_clock_pelt(rq);
270 	const struct sched_class *curr_class = rq->curr->sched_class;
271 	unsigned long hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
272 
273 	lockdep_assert_rq_held(rq);
274 
275 	return update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
276 		update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
277 		update_hw_load_avg(now, rq, hw_pressure) |
278 		update_irq_load_avg(rq, 0);
279 }
280 
281 /*
282  * This function computes an effective utilization for the given CPU, to be
283  * used for frequency selection given the linear relation: f = u * f_max.
284  *
285  * The scheduler tracks the following metrics:
286  *
287  *   cpu_util_{cfs,rt,dl,irq}()
288  *   cpu_bw_dl()
289  *
290  * Where the cfs,rt and dl util numbers are tracked with the same metric and
291  * synchronized windows and are thus directly comparable.
292  *
293  * The cfs,rt,dl utilization are the running times measured with rq->clock_task
294  * which excludes things like IRQ and steal-time. These latter are then accrued
295  * in the IRQ utilization.
296  *
297  * The DL bandwidth number OTOH is not a measured metric but a value computed
298  * based on the task model parameters and gives the minimal utilization
299  * required to meet deadlines.
300  */
301 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
302 				 unsigned long *min,
303 				 unsigned long *max)
304 {
305 	unsigned long util, irq, scale;
306 	struct rq *rq = cpu_rq(cpu);
307 
308 	scale = arch_scale_cpu_capacity(cpu);
309 
310 	/*
311 	 * Early check to see if IRQ/steal time saturates the CPU, can be
312 	 * because of inaccuracies in how we track these -- see
313 	 * update_irq_load_avg().
314 	 */
315 	irq = cpu_util_irq(rq);
316 	if (unlikely(irq >= scale)) {
317 		if (min)
318 			*min = scale;
319 		if (max)
320 			*max = scale;
321 		return scale;
322 	}
323 
324 	if (min) {
325 		/*
326 		 * The minimum utilization returns the highest level between:
327 		 * - the computed DL bandwidth needed with the IRQ pressure which
328 		 *   steals time to the deadline task.
329 		 * - The minimum performance requirement for CFS and/or RT.
330 		 */
331 		*min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN));
332 
333 		/*
334 		 * When an RT task is runnable and uclamp is not used, we must
335 		 * ensure that the task will run at maximum compute capacity.
336 		 */
337 		if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt))
338 			*min = max(*min, scale);
339 	}
340 
341 	/*
342 	 * Because the time spend on RT/DL tasks is visible as 'lost' time to
343 	 * CFS tasks and we use the same metric to track the effective
344 	 * utilization (PELT windows are synchronized) we can directly add them
345 	 * to obtain the CPU's actual utilization.
346 	 */
347 	util = util_cfs + cpu_util_rt(rq);
348 	util += cpu_util_dl(rq);
349 
350 	/*
351 	 * The maximum hint is a soft bandwidth requirement, which can be lower
352 	 * than the actual utilization because of uclamp_max requirements.
353 	 */
354 	if (max)
355 		*max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX));
356 
357 	if (util >= scale)
358 		return scale;
359 
360 	/*
361 	 * There is still idle time; further improve the number by using the
362 	 * IRQ metric. Because IRQ/steal time is hidden from the task clock we
363 	 * need to scale the task numbers:
364 	 *
365 	 *              max - irq
366 	 *   U' = irq + --------- * U
367 	 *                 max
368 	 */
369 	util = scale_irq_capacity(util, irq, scale);
370 	util += irq;
371 
372 	return min(scale, util);
373 }
374 
375 unsigned long sched_cpu_util(int cpu)
376 {
377 	return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL);
378 }
379 #endif /* CONFIG_SMP */
380 
381 /**
382  * find_process_by_pid - find a process with a matching PID value.
383  * @pid: the pid in question.
384  *
385  * The task of @pid, if found. %NULL otherwise.
386  */
387 static struct task_struct *find_process_by_pid(pid_t pid)
388 {
389 	return pid ? find_task_by_vpid(pid) : current;
390 }
391 
392 static struct task_struct *find_get_task(pid_t pid)
393 {
394 	struct task_struct *p;
395 	guard(rcu)();
396 
397 	p = find_process_by_pid(pid);
398 	if (likely(p))
399 		get_task_struct(p);
400 
401 	return p;
402 }
403 
404 DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T),
405 	     find_get_task(pid), pid_t pid)
406 
407 /*
408  * sched_setparam() passes in -1 for its policy, to let the functions
409  * it calls know not to change it.
410  */
411 #define SETPARAM_POLICY	-1
412 
413 static void __setscheduler_params(struct task_struct *p,
414 		const struct sched_attr *attr)
415 {
416 	int policy = attr->sched_policy;
417 
418 	if (policy == SETPARAM_POLICY)
419 		policy = p->policy;
420 
421 	p->policy = policy;
422 
423 	if (dl_policy(policy))
424 		__setparam_dl(p, attr);
425 	else if (fair_policy(policy))
426 		p->static_prio = NICE_TO_PRIO(attr->sched_nice);
427 
428 	/*
429 	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
430 	 * !rt_policy. Always setting this ensures that things like
431 	 * getparam()/getattr() don't report silly values for !rt tasks.
432 	 */
433 	p->rt_priority = attr->sched_priority;
434 	p->normal_prio = normal_prio(p);
435 	set_load_weight(p, true);
436 }
437 
438 /*
439  * Check the target process has a UID that matches the current process's:
440  */
441 static bool check_same_owner(struct task_struct *p)
442 {
443 	const struct cred *cred = current_cred(), *pcred;
444 	guard(rcu)();
445 
446 	pcred = __task_cred(p);
447 	return (uid_eq(cred->euid, pcred->euid) ||
448 		uid_eq(cred->euid, pcred->uid));
449 }
450 
451 #ifdef CONFIG_UCLAMP_TASK
452 
453 static int uclamp_validate(struct task_struct *p,
454 			   const struct sched_attr *attr)
455 {
456 	int util_min = p->uclamp_req[UCLAMP_MIN].value;
457 	int util_max = p->uclamp_req[UCLAMP_MAX].value;
458 
459 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
460 		util_min = attr->sched_util_min;
461 
462 		if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
463 			return -EINVAL;
464 	}
465 
466 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
467 		util_max = attr->sched_util_max;
468 
469 		if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
470 			return -EINVAL;
471 	}
472 
473 	if (util_min != -1 && util_max != -1 && util_min > util_max)
474 		return -EINVAL;
475 
476 	/*
477 	 * We have valid uclamp attributes; make sure uclamp is enabled.
478 	 *
479 	 * We need to do that here, because enabling static branches is a
480 	 * blocking operation which obviously cannot be done while holding
481 	 * scheduler locks.
482 	 */
483 	static_branch_enable(&sched_uclamp_used);
484 
485 	return 0;
486 }
487 
488 static bool uclamp_reset(const struct sched_attr *attr,
489 			 enum uclamp_id clamp_id,
490 			 struct uclamp_se *uc_se)
491 {
492 	/* Reset on sched class change for a non user-defined clamp value. */
493 	if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
494 	    !uc_se->user_defined)
495 		return true;
496 
497 	/* Reset on sched_util_{min,max} == -1. */
498 	if (clamp_id == UCLAMP_MIN &&
499 	    attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
500 	    attr->sched_util_min == -1) {
501 		return true;
502 	}
503 
504 	if (clamp_id == UCLAMP_MAX &&
505 	    attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
506 	    attr->sched_util_max == -1) {
507 		return true;
508 	}
509 
510 	return false;
511 }
512 
513 static void __setscheduler_uclamp(struct task_struct *p,
514 				  const struct sched_attr *attr)
515 {
516 	enum uclamp_id clamp_id;
517 
518 	for_each_clamp_id(clamp_id) {
519 		struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
520 		unsigned int value;
521 
522 		if (!uclamp_reset(attr, clamp_id, uc_se))
523 			continue;
524 
525 		/*
526 		 * RT by default have a 100% boost value that could be modified
527 		 * at runtime.
528 		 */
529 		if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
530 			value = sysctl_sched_uclamp_util_min_rt_default;
531 		else
532 			value = uclamp_none(clamp_id);
533 
534 		uclamp_se_set(uc_se, value, false);
535 
536 	}
537 
538 	if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
539 		return;
540 
541 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
542 	    attr->sched_util_min != -1) {
543 		uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
544 			      attr->sched_util_min, true);
545 	}
546 
547 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
548 	    attr->sched_util_max != -1) {
549 		uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
550 			      attr->sched_util_max, true);
551 	}
552 }
553 
554 #else /* !CONFIG_UCLAMP_TASK: */
555 
556 static inline int uclamp_validate(struct task_struct *p,
557 				  const struct sched_attr *attr)
558 {
559 	return -EOPNOTSUPP;
560 }
561 static void __setscheduler_uclamp(struct task_struct *p,
562 				  const struct sched_attr *attr) { }
563 #endif
564 
565 /*
566  * Allow unprivileged RT tasks to decrease priority.
567  * Only issue a capable test if needed and only once to avoid an audit
568  * event on permitted non-privileged operations:
569  */
570 static int user_check_sched_setscheduler(struct task_struct *p,
571 					 const struct sched_attr *attr,
572 					 int policy, int reset_on_fork)
573 {
574 	if (fair_policy(policy)) {
575 		if (attr->sched_nice < task_nice(p) &&
576 		    !is_nice_reduction(p, attr->sched_nice))
577 			goto req_priv;
578 	}
579 
580 	if (rt_policy(policy)) {
581 		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
582 
583 		/* Can't set/change the rt policy: */
584 		if (policy != p->policy && !rlim_rtprio)
585 			goto req_priv;
586 
587 		/* Can't increase priority: */
588 		if (attr->sched_priority > p->rt_priority &&
589 		    attr->sched_priority > rlim_rtprio)
590 			goto req_priv;
591 	}
592 
593 	/*
594 	 * Can't set/change SCHED_DEADLINE policy at all for now
595 	 * (safest behavior); in the future we would like to allow
596 	 * unprivileged DL tasks to increase their relative deadline
597 	 * or reduce their runtime (both ways reducing utilization)
598 	 */
599 	if (dl_policy(policy))
600 		goto req_priv;
601 
602 	/*
603 	 * Treat SCHED_IDLE as nice 20. Only allow a switch to
604 	 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
605 	 */
606 	if (task_has_idle_policy(p) && !idle_policy(policy)) {
607 		if (!is_nice_reduction(p, task_nice(p)))
608 			goto req_priv;
609 	}
610 
611 	/* Can't change other user's priorities: */
612 	if (!check_same_owner(p))
613 		goto req_priv;
614 
615 	/* Normal users shall not reset the sched_reset_on_fork flag: */
616 	if (p->sched_reset_on_fork && !reset_on_fork)
617 		goto req_priv;
618 
619 	return 0;
620 
621 req_priv:
622 	if (!capable(CAP_SYS_NICE))
623 		return -EPERM;
624 
625 	return 0;
626 }
627 
628 int __sched_setscheduler(struct task_struct *p,
629 			 const struct sched_attr *attr,
630 			 bool user, bool pi)
631 {
632 	int oldpolicy = -1, policy = attr->sched_policy;
633 	int retval, oldprio, newprio, queued, running;
634 	const struct sched_class *prev_class;
635 	struct balance_callback *head;
636 	struct rq_flags rf;
637 	int reset_on_fork;
638 	int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
639 	struct rq *rq;
640 	bool cpuset_locked = false;
641 
642 	/* The pi code expects interrupts enabled */
643 	BUG_ON(pi && in_interrupt());
644 recheck:
645 	/* Double check policy once rq lock held: */
646 	if (policy < 0) {
647 		reset_on_fork = p->sched_reset_on_fork;
648 		policy = oldpolicy = p->policy;
649 	} else {
650 		reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
651 
652 		if (!valid_policy(policy))
653 			return -EINVAL;
654 	}
655 
656 	if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
657 		return -EINVAL;
658 
659 	/*
660 	 * Valid priorities for SCHED_FIFO and SCHED_RR are
661 	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
662 	 * SCHED_BATCH and SCHED_IDLE is 0.
663 	 */
664 	if (attr->sched_priority > MAX_RT_PRIO-1)
665 		return -EINVAL;
666 	if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
667 	    (rt_policy(policy) != (attr->sched_priority != 0)))
668 		return -EINVAL;
669 
670 	if (user) {
671 		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
672 		if (retval)
673 			return retval;
674 
675 		if (attr->sched_flags & SCHED_FLAG_SUGOV)
676 			return -EINVAL;
677 
678 		retval = security_task_setscheduler(p);
679 		if (retval)
680 			return retval;
681 	}
682 
683 	/* Update task specific "requested" clamps */
684 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
685 		retval = uclamp_validate(p, attr);
686 		if (retval)
687 			return retval;
688 	}
689 
690 	/*
691 	 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets
692 	 * information.
693 	 */
694 	if (dl_policy(policy) || dl_policy(p->policy)) {
695 		cpuset_locked = true;
696 		cpuset_lock();
697 	}
698 
699 	/*
700 	 * Make sure no PI-waiters arrive (or leave) while we are
701 	 * changing the priority of the task:
702 	 *
703 	 * To be able to change p->policy safely, the appropriate
704 	 * runqueue lock must be held.
705 	 */
706 	rq = task_rq_lock(p, &rf);
707 	update_rq_clock(rq);
708 
709 	/*
710 	 * Changing the policy of the stop threads its a very bad idea:
711 	 */
712 	if (p == rq->stop) {
713 		retval = -EINVAL;
714 		goto unlock;
715 	}
716 
717 	/*
718 	 * If not changing anything there's no need to proceed further,
719 	 * but store a possible modification of reset_on_fork.
720 	 */
721 	if (unlikely(policy == p->policy)) {
722 		if (fair_policy(policy) && attr->sched_nice != task_nice(p))
723 			goto change;
724 		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
725 			goto change;
726 		if (dl_policy(policy) && dl_param_changed(p, attr))
727 			goto change;
728 		if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
729 			goto change;
730 
731 		p->sched_reset_on_fork = reset_on_fork;
732 		retval = 0;
733 		goto unlock;
734 	}
735 change:
736 
737 	if (user) {
738 #ifdef CONFIG_RT_GROUP_SCHED
739 		/*
740 		 * Do not allow real-time tasks into groups that have no runtime
741 		 * assigned.
742 		 */
743 		if (rt_bandwidth_enabled() && rt_policy(policy) &&
744 				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
745 				!task_group_is_autogroup(task_group(p))) {
746 			retval = -EPERM;
747 			goto unlock;
748 		}
749 #endif
750 #ifdef CONFIG_SMP
751 		if (dl_bandwidth_enabled() && dl_policy(policy) &&
752 				!(attr->sched_flags & SCHED_FLAG_SUGOV)) {
753 			cpumask_t *span = rq->rd->span;
754 
755 			/*
756 			 * Don't allow tasks with an affinity mask smaller than
757 			 * the entire root_domain to become SCHED_DEADLINE. We
758 			 * will also fail if there's no bandwidth available.
759 			 */
760 			if (!cpumask_subset(span, p->cpus_ptr) ||
761 			    rq->rd->dl_bw.bw == 0) {
762 				retval = -EPERM;
763 				goto unlock;
764 			}
765 		}
766 #endif
767 	}
768 
769 	/* Re-check policy now with rq lock held: */
770 	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
771 		policy = oldpolicy = -1;
772 		task_rq_unlock(rq, p, &rf);
773 		if (cpuset_locked)
774 			cpuset_unlock();
775 		goto recheck;
776 	}
777 
778 	/*
779 	 * If setscheduling to SCHED_DEADLINE (or changing the parameters
780 	 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
781 	 * is available.
782 	 */
783 	if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
784 		retval = -EBUSY;
785 		goto unlock;
786 	}
787 
788 	p->sched_reset_on_fork = reset_on_fork;
789 	oldprio = p->prio;
790 
791 	newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
792 	if (pi) {
793 		/*
794 		 * Take priority boosted tasks into account. If the new
795 		 * effective priority is unchanged, we just store the new
796 		 * normal parameters and do not touch the scheduler class and
797 		 * the runqueue. This will be done when the task deboost
798 		 * itself.
799 		 */
800 		newprio = rt_effective_prio(p, newprio);
801 		if (newprio == oldprio)
802 			queue_flags &= ~DEQUEUE_MOVE;
803 	}
804 
805 	queued = task_on_rq_queued(p);
806 	running = task_current(rq, p);
807 	if (queued)
808 		dequeue_task(rq, p, queue_flags);
809 	if (running)
810 		put_prev_task(rq, p);
811 
812 	prev_class = p->sched_class;
813 
814 	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
815 		__setscheduler_params(p, attr);
816 		__setscheduler_prio(p, newprio);
817 	}
818 	__setscheduler_uclamp(p, attr);
819 	check_class_changing(rq, p, prev_class);
820 
821 	if (queued) {
822 		/*
823 		 * We enqueue to tail when the priority of a task is
824 		 * increased (user space view).
825 		 */
826 		if (oldprio < p->prio)
827 			queue_flags |= ENQUEUE_HEAD;
828 
829 		enqueue_task(rq, p, queue_flags);
830 	}
831 	if (running)
832 		set_next_task(rq, p);
833 
834 	check_class_changed(rq, p, prev_class, oldprio);
835 
836 	/* Avoid rq from going away on us: */
837 	preempt_disable();
838 	head = splice_balance_callbacks(rq);
839 	task_rq_unlock(rq, p, &rf);
840 
841 	if (pi) {
842 		if (cpuset_locked)
843 			cpuset_unlock();
844 		rt_mutex_adjust_pi(p);
845 	}
846 
847 	/* Run balance callbacks after we've adjusted the PI chain: */
848 	balance_callbacks(rq, head);
849 	preempt_enable();
850 
851 	return 0;
852 
853 unlock:
854 	task_rq_unlock(rq, p, &rf);
855 	if (cpuset_locked)
856 		cpuset_unlock();
857 	return retval;
858 }
859 
860 static int _sched_setscheduler(struct task_struct *p, int policy,
861 			       const struct sched_param *param, bool check)
862 {
863 	struct sched_attr attr = {
864 		.sched_policy   = policy,
865 		.sched_priority = param->sched_priority,
866 		.sched_nice	= PRIO_TO_NICE(p->static_prio),
867 	};
868 
869 	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
870 	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
871 		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
872 		policy &= ~SCHED_RESET_ON_FORK;
873 		attr.sched_policy = policy;
874 	}
875 
876 	return __sched_setscheduler(p, &attr, check, true);
877 }
878 /**
879  * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
880  * @p: the task in question.
881  * @policy: new policy.
882  * @param: structure containing the new RT priority.
883  *
884  * Use sched_set_fifo(), read its comment.
885  *
886  * Return: 0 on success. An error code otherwise.
887  *
888  * NOTE that the task may be already dead.
889  */
890 int sched_setscheduler(struct task_struct *p, int policy,
891 		       const struct sched_param *param)
892 {
893 	return _sched_setscheduler(p, policy, param, true);
894 }
895 
896 int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
897 {
898 	return __sched_setscheduler(p, attr, true, true);
899 }
900 
901 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
902 {
903 	return __sched_setscheduler(p, attr, false, true);
904 }
905 EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
906 
907 /**
908  * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernel-space.
909  * @p: the task in question.
910  * @policy: new policy.
911  * @param: structure containing the new RT priority.
912  *
913  * Just like sched_setscheduler, only don't bother checking if the
914  * current context has permission.  For example, this is needed in
915  * stop_machine(): we create temporary high priority worker threads,
916  * but our caller might not have that capability.
917  *
918  * Return: 0 on success. An error code otherwise.
919  */
920 int sched_setscheduler_nocheck(struct task_struct *p, int policy,
921 			       const struct sched_param *param)
922 {
923 	return _sched_setscheduler(p, policy, param, false);
924 }
925 
926 /*
927  * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
928  * incapable of resource management, which is the one thing an OS really should
929  * be doing.
930  *
931  * This is of course the reason it is limited to privileged users only.
932  *
933  * Worse still; it is fundamentally impossible to compose static priority
934  * workloads. You cannot take two correctly working static prio workloads
935  * and smash them together and still expect them to work.
936  *
937  * For this reason 'all' FIFO tasks the kernel creates are basically at:
938  *
939  *   MAX_RT_PRIO / 2
940  *
941  * The administrator _MUST_ configure the system, the kernel simply doesn't
942  * know enough information to make a sensible choice.
943  */
944 void sched_set_fifo(struct task_struct *p)
945 {
946 	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
947 	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
948 }
949 EXPORT_SYMBOL_GPL(sched_set_fifo);
950 
951 /*
952  * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
953  */
954 void sched_set_fifo_low(struct task_struct *p)
955 {
956 	struct sched_param sp = { .sched_priority = 1 };
957 	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
958 }
959 EXPORT_SYMBOL_GPL(sched_set_fifo_low);
960 
961 void sched_set_normal(struct task_struct *p, int nice)
962 {
963 	struct sched_attr attr = {
964 		.sched_policy = SCHED_NORMAL,
965 		.sched_nice = nice,
966 	};
967 	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
968 }
969 EXPORT_SYMBOL_GPL(sched_set_normal);
970 
971 static int
972 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
973 {
974 	struct sched_param lparam;
975 
976 	if (!param || pid < 0)
977 		return -EINVAL;
978 	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
979 		return -EFAULT;
980 
981 	CLASS(find_get_task, p)(pid);
982 	if (!p)
983 		return -ESRCH;
984 
985 	return sched_setscheduler(p, policy, &lparam);
986 }
987 
988 /*
989  * Mimics kernel/events/core.c perf_copy_attr().
990  */
991 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
992 {
993 	u32 size;
994 	int ret;
995 
996 	/* Zero the full structure, so that a short copy will be nice: */
997 	memset(attr, 0, sizeof(*attr));
998 
999 	ret = get_user(size, &uattr->size);
1000 	if (ret)
1001 		return ret;
1002 
1003 	/* ABI compatibility quirk: */
1004 	if (!size)
1005 		size = SCHED_ATTR_SIZE_VER0;
1006 	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
1007 		goto err_size;
1008 
1009 	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
1010 	if (ret) {
1011 		if (ret == -E2BIG)
1012 			goto err_size;
1013 		return ret;
1014 	}
1015 
1016 	if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
1017 	    size < SCHED_ATTR_SIZE_VER1)
1018 		return -EINVAL;
1019 
1020 	/*
1021 	 * XXX: Do we want to be lenient like existing syscalls; or do we want
1022 	 * to be strict and return an error on out-of-bounds values?
1023 	 */
1024 	attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
1025 
1026 	return 0;
1027 
1028 err_size:
1029 	put_user(sizeof(*attr), &uattr->size);
1030 	return -E2BIG;
1031 }
1032 
1033 static void get_params(struct task_struct *p, struct sched_attr *attr)
1034 {
1035 	if (task_has_dl_policy(p))
1036 		__getparam_dl(p, attr);
1037 	else if (task_has_rt_policy(p))
1038 		attr->sched_priority = p->rt_priority;
1039 	else
1040 		attr->sched_nice = task_nice(p);
1041 }
1042 
1043 /**
1044  * sys_sched_setscheduler - set/change the scheduler policy and RT priority
1045  * @pid: the pid in question.
1046  * @policy: new policy.
1047  * @param: structure containing the new RT priority.
1048  *
1049  * Return: 0 on success. An error code otherwise.
1050  */
1051 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
1052 {
1053 	if (policy < 0)
1054 		return -EINVAL;
1055 
1056 	return do_sched_setscheduler(pid, policy, param);
1057 }
1058 
1059 /**
1060  * sys_sched_setparam - set/change the RT priority of a thread
1061  * @pid: the pid in question.
1062  * @param: structure containing the new RT priority.
1063  *
1064  * Return: 0 on success. An error code otherwise.
1065  */
1066 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1067 {
1068 	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
1069 }
1070 
1071 /**
1072  * sys_sched_setattr - same as above, but with extended sched_attr
1073  * @pid: the pid in question.
1074  * @uattr: structure containing the extended parameters.
1075  * @flags: for future extension.
1076  */
1077 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
1078 			       unsigned int, flags)
1079 {
1080 	struct sched_attr attr;
1081 	int retval;
1082 
1083 	if (!uattr || pid < 0 || flags)
1084 		return -EINVAL;
1085 
1086 	retval = sched_copy_attr(uattr, &attr);
1087 	if (retval)
1088 		return retval;
1089 
1090 	if ((int)attr.sched_policy < 0)
1091 		return -EINVAL;
1092 	if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
1093 		attr.sched_policy = SETPARAM_POLICY;
1094 
1095 	CLASS(find_get_task, p)(pid);
1096 	if (!p)
1097 		return -ESRCH;
1098 
1099 	if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
1100 		get_params(p, &attr);
1101 
1102 	return sched_setattr(p, &attr);
1103 }
1104 
1105 /**
1106  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
1107  * @pid: the pid in question.
1108  *
1109  * Return: On success, the policy of the thread. Otherwise, a negative error
1110  * code.
1111  */
1112 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1113 {
1114 	struct task_struct *p;
1115 	int retval;
1116 
1117 	if (pid < 0)
1118 		return -EINVAL;
1119 
1120 	guard(rcu)();
1121 	p = find_process_by_pid(pid);
1122 	if (!p)
1123 		return -ESRCH;
1124 
1125 	retval = security_task_getscheduler(p);
1126 	if (!retval) {
1127 		retval = p->policy;
1128 		if (p->sched_reset_on_fork)
1129 			retval |= SCHED_RESET_ON_FORK;
1130 	}
1131 	return retval;
1132 }
1133 
1134 /**
1135  * sys_sched_getparam - get the RT priority of a thread
1136  * @pid: the pid in question.
1137  * @param: structure containing the RT priority.
1138  *
1139  * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
1140  * code.
1141  */
1142 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1143 {
1144 	struct sched_param lp = { .sched_priority = 0 };
1145 	struct task_struct *p;
1146 	int retval;
1147 
1148 	if (!param || pid < 0)
1149 		return -EINVAL;
1150 
1151 	scoped_guard (rcu) {
1152 		p = find_process_by_pid(pid);
1153 		if (!p)
1154 			return -ESRCH;
1155 
1156 		retval = security_task_getscheduler(p);
1157 		if (retval)
1158 			return retval;
1159 
1160 		if (task_has_rt_policy(p))
1161 			lp.sched_priority = p->rt_priority;
1162 	}
1163 
1164 	/*
1165 	 * This one might sleep, we cannot do it with a spinlock held ...
1166 	 */
1167 	return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
1168 }
1169 
1170 /*
1171  * Copy the kernel size attribute structure (which might be larger
1172  * than what user-space knows about) to user-space.
1173  *
1174  * Note that all cases are valid: user-space buffer can be larger or
1175  * smaller than the kernel-space buffer. The usual case is that both
1176  * have the same size.
1177  */
1178 static int
1179 sched_attr_copy_to_user(struct sched_attr __user *uattr,
1180 			struct sched_attr *kattr,
1181 			unsigned int usize)
1182 {
1183 	unsigned int ksize = sizeof(*kattr);
1184 
1185 	if (!access_ok(uattr, usize))
1186 		return -EFAULT;
1187 
1188 	/*
1189 	 * sched_getattr() ABI forwards and backwards compatibility:
1190 	 *
1191 	 * If usize == ksize then we just copy everything to user-space and all is good.
1192 	 *
1193 	 * If usize < ksize then we only copy as much as user-space has space for,
1194 	 * this keeps ABI compatibility as well. We skip the rest.
1195 	 *
1196 	 * If usize > ksize then user-space is using a newer version of the ABI,
1197 	 * which part the kernel doesn't know about. Just ignore it - tooling can
1198 	 * detect the kernel's knowledge of attributes from the attr->size value
1199 	 * which is set to ksize in this case.
1200 	 */
1201 	kattr->size = min(usize, ksize);
1202 
1203 	if (copy_to_user(uattr, kattr, kattr->size))
1204 		return -EFAULT;
1205 
1206 	return 0;
1207 }
1208 
1209 /**
1210  * sys_sched_getattr - similar to sched_getparam, but with sched_attr
1211  * @pid: the pid in question.
1212  * @uattr: structure containing the extended parameters.
1213  * @usize: sizeof(attr) for fwd/bwd comp.
1214  * @flags: for future extension.
1215  */
1216 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
1217 		unsigned int, usize, unsigned int, flags)
1218 {
1219 	struct sched_attr kattr = { };
1220 	struct task_struct *p;
1221 	int retval;
1222 
1223 	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
1224 	    usize < SCHED_ATTR_SIZE_VER0 || flags)
1225 		return -EINVAL;
1226 
1227 	scoped_guard (rcu) {
1228 		p = find_process_by_pid(pid);
1229 		if (!p)
1230 			return -ESRCH;
1231 
1232 		retval = security_task_getscheduler(p);
1233 		if (retval)
1234 			return retval;
1235 
1236 		kattr.sched_policy = p->policy;
1237 		if (p->sched_reset_on_fork)
1238 			kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
1239 		get_params(p, &kattr);
1240 		kattr.sched_flags &= SCHED_FLAG_ALL;
1241 
1242 #ifdef CONFIG_UCLAMP_TASK
1243 		/*
1244 		 * This could race with another potential updater, but this is fine
1245 		 * because it'll correctly read the old or the new value. We don't need
1246 		 * to guarantee who wins the race as long as it doesn't return garbage.
1247 		 */
1248 		kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
1249 		kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
1250 #endif
1251 	}
1252 
1253 	return sched_attr_copy_to_user(uattr, &kattr, usize);
1254 }
1255 
1256 #ifdef CONFIG_SMP
1257 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
1258 {
1259 	/*
1260 	 * If the task isn't a deadline task or admission control is
1261 	 * disabled then we don't care about affinity changes.
1262 	 */
1263 	if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
1264 		return 0;
1265 
1266 	/*
1267 	 * Since bandwidth control happens on root_domain basis,
1268 	 * if admission test is enabled, we only admit -deadline
1269 	 * tasks allowed to run on all the CPUs in the task's
1270 	 * root_domain.
1271 	 */
1272 	guard(rcu)();
1273 	if (!cpumask_subset(task_rq(p)->rd->span, mask))
1274 		return -EBUSY;
1275 
1276 	return 0;
1277 }
1278 #endif /* CONFIG_SMP */
1279 
1280 int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
1281 {
1282 	int retval;
1283 	cpumask_var_t cpus_allowed, new_mask;
1284 
1285 	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
1286 		return -ENOMEM;
1287 
1288 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
1289 		retval = -ENOMEM;
1290 		goto out_free_cpus_allowed;
1291 	}
1292 
1293 	cpuset_cpus_allowed(p, cpus_allowed);
1294 	cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
1295 
1296 	ctx->new_mask = new_mask;
1297 	ctx->flags |= SCA_CHECK;
1298 
1299 	retval = dl_task_check_affinity(p, new_mask);
1300 	if (retval)
1301 		goto out_free_new_mask;
1302 
1303 	retval = __set_cpus_allowed_ptr(p, ctx);
1304 	if (retval)
1305 		goto out_free_new_mask;
1306 
1307 	cpuset_cpus_allowed(p, cpus_allowed);
1308 	if (!cpumask_subset(new_mask, cpus_allowed)) {
1309 		/*
1310 		 * We must have raced with a concurrent cpuset update.
1311 		 * Just reset the cpumask to the cpuset's cpus_allowed.
1312 		 */
1313 		cpumask_copy(new_mask, cpus_allowed);
1314 
1315 		/*
1316 		 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
1317 		 * will restore the previous user_cpus_ptr value.
1318 		 *
1319 		 * In the unlikely event a previous user_cpus_ptr exists,
1320 		 * we need to further restrict the mask to what is allowed
1321 		 * by that old user_cpus_ptr.
1322 		 */
1323 		if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
1324 			bool empty = !cpumask_and(new_mask, new_mask,
1325 						  ctx->user_mask);
1326 
1327 			if (WARN_ON_ONCE(empty))
1328 				cpumask_copy(new_mask, cpus_allowed);
1329 		}
1330 		__set_cpus_allowed_ptr(p, ctx);
1331 		retval = -EINVAL;
1332 	}
1333 
1334 out_free_new_mask:
1335 	free_cpumask_var(new_mask);
1336 out_free_cpus_allowed:
1337 	free_cpumask_var(cpus_allowed);
1338 	return retval;
1339 }
1340 
1341 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1342 {
1343 	struct affinity_context ac;
1344 	struct cpumask *user_mask;
1345 	int retval;
1346 
1347 	CLASS(find_get_task, p)(pid);
1348 	if (!p)
1349 		return -ESRCH;
1350 
1351 	if (p->flags & PF_NO_SETAFFINITY)
1352 		return -EINVAL;
1353 
1354 	if (!check_same_owner(p)) {
1355 		guard(rcu)();
1356 		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
1357 			return -EPERM;
1358 	}
1359 
1360 	retval = security_task_setscheduler(p);
1361 	if (retval)
1362 		return retval;
1363 
1364 	/*
1365 	 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
1366 	 * alloc_user_cpus_ptr() returns NULL.
1367 	 */
1368 	user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
1369 	if (user_mask) {
1370 		cpumask_copy(user_mask, in_mask);
1371 	} else if (IS_ENABLED(CONFIG_SMP)) {
1372 		return -ENOMEM;
1373 	}
1374 
1375 	ac = (struct affinity_context){
1376 		.new_mask  = in_mask,
1377 		.user_mask = user_mask,
1378 		.flags     = SCA_USER,
1379 	};
1380 
1381 	retval = __sched_setaffinity(p, &ac);
1382 	kfree(ac.user_mask);
1383 
1384 	return retval;
1385 }
1386 
1387 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
1388 			     struct cpumask *new_mask)
1389 {
1390 	if (len < cpumask_size())
1391 		cpumask_clear(new_mask);
1392 	else if (len > cpumask_size())
1393 		len = cpumask_size();
1394 
1395 	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
1396 }
1397 
1398 /**
1399  * sys_sched_setaffinity - set the CPU affinity of a process
1400  * @pid: pid of the process
1401  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
1402  * @user_mask_ptr: user-space pointer to the new CPU mask
1403  *
1404  * Return: 0 on success. An error code otherwise.
1405  */
1406 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
1407 		unsigned long __user *, user_mask_ptr)
1408 {
1409 	cpumask_var_t new_mask;
1410 	int retval;
1411 
1412 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
1413 		return -ENOMEM;
1414 
1415 	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
1416 	if (retval == 0)
1417 		retval = sched_setaffinity(pid, new_mask);
1418 	free_cpumask_var(new_mask);
1419 	return retval;
1420 }
1421 
1422 long sched_getaffinity(pid_t pid, struct cpumask *mask)
1423 {
1424 	struct task_struct *p;
1425 	int retval;
1426 
1427 	guard(rcu)();
1428 	p = find_process_by_pid(pid);
1429 	if (!p)
1430 		return -ESRCH;
1431 
1432 	retval = security_task_getscheduler(p);
1433 	if (retval)
1434 		return retval;
1435 
1436 	guard(raw_spinlock_irqsave)(&p->pi_lock);
1437 	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
1438 
1439 	return 0;
1440 }
1441 
1442 /**
1443  * sys_sched_getaffinity - get the CPU affinity of a process
1444  * @pid: pid of the process
1445  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
1446  * @user_mask_ptr: user-space pointer to hold the current CPU mask
1447  *
1448  * Return: size of CPU mask copied to user_mask_ptr on success. An
1449  * error code otherwise.
1450  */
1451 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
1452 		unsigned long __user *, user_mask_ptr)
1453 {
1454 	int ret;
1455 	cpumask_var_t mask;
1456 
1457 	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
1458 		return -EINVAL;
1459 	if (len & (sizeof(unsigned long)-1))
1460 		return -EINVAL;
1461 
1462 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1463 		return -ENOMEM;
1464 
1465 	ret = sched_getaffinity(pid, mask);
1466 	if (ret == 0) {
1467 		unsigned int retlen = min(len, cpumask_size());
1468 
1469 		if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
1470 			ret = -EFAULT;
1471 		else
1472 			ret = retlen;
1473 	}
1474 	free_cpumask_var(mask);
1475 
1476 	return ret;
1477 }
1478 
1479 static void do_sched_yield(void)
1480 {
1481 	struct rq_flags rf;
1482 	struct rq *rq;
1483 
1484 	rq = this_rq_lock_irq(&rf);
1485 
1486 	schedstat_inc(rq->yld_count);
1487 	current->sched_class->yield_task(rq);
1488 
1489 	preempt_disable();
1490 	rq_unlock_irq(rq, &rf);
1491 	sched_preempt_enable_no_resched();
1492 
1493 	schedule();
1494 }
1495 
1496 /**
1497  * sys_sched_yield - yield the current processor to other threads.
1498  *
1499  * This function yields the current CPU to other tasks. If there are no
1500  * other threads running on this CPU then this function will return.
1501  *
1502  * Return: 0.
1503  */
1504 SYSCALL_DEFINE0(sched_yield)
1505 {
1506 	do_sched_yield();
1507 	return 0;
1508 }
1509 
1510 /**
1511  * yield - yield the current processor to other threads.
1512  *
1513  * Do not ever use this function, there's a 99% chance you're doing it wrong.
1514  *
1515  * The scheduler is at all times free to pick the calling task as the most
1516  * eligible task to run, if removing the yield() call from your code breaks
1517  * it, it's already broken.
1518  *
1519  * Typical broken usage is:
1520  *
1521  * while (!event)
1522  *	yield();
1523  *
1524  * where one assumes that yield() will let 'the other' process run that will
1525  * make event true. If the current task is a SCHED_FIFO task that will never
1526  * happen. Never use yield() as a progress guarantee!!
1527  *
1528  * If you want to use yield() to wait for something, use wait_event().
1529  * If you want to use yield() to be 'nice' for others, use cond_resched().
1530  * If you still want to use yield(), do not!
1531  */
1532 void __sched yield(void)
1533 {
1534 	set_current_state(TASK_RUNNING);
1535 	do_sched_yield();
1536 }
1537 EXPORT_SYMBOL(yield);
1538 
1539 /**
1540  * yield_to - yield the current processor to another thread in
1541  * your thread group, or accelerate that thread toward the
1542  * processor it's on.
1543  * @p: target task
1544  * @preempt: whether task preemption is allowed or not
1545  *
1546  * It's the caller's job to ensure that the target task struct
1547  * can't go away on us before we can do any checks.
1548  *
1549  * Return:
1550  *	true (>0) if we indeed boosted the target task.
1551  *	false (0) if we failed to boost the target.
1552  *	-ESRCH if there's no task to yield to.
1553  */
1554 int __sched yield_to(struct task_struct *p, bool preempt)
1555 {
1556 	struct task_struct *curr = current;
1557 	struct rq *rq, *p_rq;
1558 	int yielded = 0;
1559 
1560 	scoped_guard (irqsave) {
1561 		rq = this_rq();
1562 
1563 again:
1564 		p_rq = task_rq(p);
1565 		/*
1566 		 * If we're the only runnable task on the rq and target rq also
1567 		 * has only one task, there's absolutely no point in yielding.
1568 		 */
1569 		if (rq->nr_running == 1 && p_rq->nr_running == 1)
1570 			return -ESRCH;
1571 
1572 		guard(double_rq_lock)(rq, p_rq);
1573 		if (task_rq(p) != p_rq)
1574 			goto again;
1575 
1576 		if (!curr->sched_class->yield_to_task)
1577 			return 0;
1578 
1579 		if (curr->sched_class != p->sched_class)
1580 			return 0;
1581 
1582 		if (task_on_cpu(p_rq, p) || !task_is_running(p))
1583 			return 0;
1584 
1585 		yielded = curr->sched_class->yield_to_task(rq, p);
1586 		if (yielded) {
1587 			schedstat_inc(rq->yld_count);
1588 			/*
1589 			 * Make p's CPU reschedule; pick_next_entity
1590 			 * takes care of fairness.
1591 			 */
1592 			if (preempt && rq != p_rq)
1593 				resched_curr(p_rq);
1594 		}
1595 	}
1596 
1597 	if (yielded)
1598 		schedule();
1599 
1600 	return yielded;
1601 }
1602 EXPORT_SYMBOL_GPL(yield_to);
1603 
1604 /**
1605  * sys_sched_get_priority_max - return maximum RT priority.
1606  * @policy: scheduling class.
1607  *
1608  * Return: On success, this syscall returns the maximum
1609  * rt_priority that can be used by a given scheduling class.
1610  * On failure, a negative error code is returned.
1611  */
1612 SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1613 {
1614 	int ret = -EINVAL;
1615 
1616 	switch (policy) {
1617 	case SCHED_FIFO:
1618 	case SCHED_RR:
1619 		ret = MAX_RT_PRIO-1;
1620 		break;
1621 	case SCHED_DEADLINE:
1622 	case SCHED_NORMAL:
1623 	case SCHED_BATCH:
1624 	case SCHED_IDLE:
1625 		ret = 0;
1626 		break;
1627 	}
1628 	return ret;
1629 }
1630 
1631 /**
1632  * sys_sched_get_priority_min - return minimum RT priority.
1633  * @policy: scheduling class.
1634  *
1635  * Return: On success, this syscall returns the minimum
1636  * rt_priority that can be used by a given scheduling class.
1637  * On failure, a negative error code is returned.
1638  */
1639 SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1640 {
1641 	int ret = -EINVAL;
1642 
1643 	switch (policy) {
1644 	case SCHED_FIFO:
1645 	case SCHED_RR:
1646 		ret = 1;
1647 		break;
1648 	case SCHED_DEADLINE:
1649 	case SCHED_NORMAL:
1650 	case SCHED_BATCH:
1651 	case SCHED_IDLE:
1652 		ret = 0;
1653 	}
1654 	return ret;
1655 }
1656 
1657 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
1658 {
1659 	unsigned int time_slice = 0;
1660 	int retval;
1661 
1662 	if (pid < 0)
1663 		return -EINVAL;
1664 
1665 	scoped_guard (rcu) {
1666 		struct task_struct *p = find_process_by_pid(pid);
1667 		if (!p)
1668 			return -ESRCH;
1669 
1670 		retval = security_task_getscheduler(p);
1671 		if (retval)
1672 			return retval;
1673 
1674 		scoped_guard (task_rq_lock, p) {
1675 			struct rq *rq = scope.rq;
1676 			if (p->sched_class->get_rr_interval)
1677 				time_slice = p->sched_class->get_rr_interval(rq, p);
1678 		}
1679 	}
1680 
1681 	jiffies_to_timespec64(time_slice, t);
1682 	return 0;
1683 }
1684 
1685 /**
1686  * sys_sched_rr_get_interval - return the default time-slice of a process.
1687  * @pid: pid of the process.
1688  * @interval: userspace pointer to the time-slice value.
1689  *
1690  * this syscall writes the default time-slice value of a given process
1691  * into the user-space timespec buffer. A value of '0' means infinity.
1692  *
1693  * Return: On success, 0 and the time-slice is in @interval. Otherwise,
1694  * an error code.
1695  */
1696 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
1697 		struct __kernel_timespec __user *, interval)
1698 {
1699 	struct timespec64 t;
1700 	int retval = sched_rr_get_interval(pid, &t);
1701 
1702 	if (retval == 0)
1703 		retval = put_timespec64(&t, interval);
1704 
1705 	return retval;
1706 }
1707 
1708 #ifdef CONFIG_COMPAT_32BIT_TIME
1709 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
1710 		struct old_timespec32 __user *, interval)
1711 {
1712 	struct timespec64 t;
1713 	int retval = sched_rr_get_interval(pid, &t);
1714 
1715 	if (retval == 0)
1716 		retval = put_old_timespec32(&t, interval);
1717 	return retval;
1718 }
1719 #endif
1720