xref: /linux/kernel/sys.c (revision 17cfcb68af3bc7d5e8ae08779b1853310a2949f3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/kernel/sys.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7 
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/reboot.h>
13 #include <linux/prctl.h>
14 #include <linux/highuid.h>
15 #include <linux/fs.h>
16 #include <linux/kmod.h>
17 #include <linux/perf_event.h>
18 #include <linux/resource.h>
19 #include <linux/kernel.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
35 #include <linux/cpu.h>
36 #include <linux/personality.h>
37 #include <linux/ptrace.h>
38 #include <linux/fs_struct.h>
39 #include <linux/file.h>
40 #include <linux/mount.h>
41 #include <linux/gfp.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/version.h>
44 #include <linux/ctype.h>
45 
46 #include <linux/compat.h>
47 #include <linux/syscalls.h>
48 #include <linux/kprobes.h>
49 #include <linux/user_namespace.h>
50 #include <linux/binfmts.h>
51 
52 #include <linux/sched.h>
53 #include <linux/sched/autogroup.h>
54 #include <linux/sched/loadavg.h>
55 #include <linux/sched/stat.h>
56 #include <linux/sched/mm.h>
57 #include <linux/sched/coredump.h>
58 #include <linux/sched/task.h>
59 #include <linux/sched/cputime.h>
60 #include <linux/rcupdate.h>
61 #include <linux/uidgid.h>
62 #include <linux/cred.h>
63 
64 #include <linux/nospec.h>
65 
66 #include <linux/kmsg_dump.h>
67 /* Move somewhere else to avoid recompiling? */
68 #include <generated/utsrelease.h>
69 
70 #include <linux/uaccess.h>
71 #include <asm/io.h>
72 #include <asm/unistd.h>
73 
74 #include "uid16.h"
75 
76 #ifndef SET_UNALIGN_CTL
77 # define SET_UNALIGN_CTL(a, b)	(-EINVAL)
78 #endif
79 #ifndef GET_UNALIGN_CTL
80 # define GET_UNALIGN_CTL(a, b)	(-EINVAL)
81 #endif
82 #ifndef SET_FPEMU_CTL
83 # define SET_FPEMU_CTL(a, b)	(-EINVAL)
84 #endif
85 #ifndef GET_FPEMU_CTL
86 # define GET_FPEMU_CTL(a, b)	(-EINVAL)
87 #endif
88 #ifndef SET_FPEXC_CTL
89 # define SET_FPEXC_CTL(a, b)	(-EINVAL)
90 #endif
91 #ifndef GET_FPEXC_CTL
92 # define GET_FPEXC_CTL(a, b)	(-EINVAL)
93 #endif
94 #ifndef GET_ENDIAN
95 # define GET_ENDIAN(a, b)	(-EINVAL)
96 #endif
97 #ifndef SET_ENDIAN
98 # define SET_ENDIAN(a, b)	(-EINVAL)
99 #endif
100 #ifndef GET_TSC_CTL
101 # define GET_TSC_CTL(a)		(-EINVAL)
102 #endif
103 #ifndef SET_TSC_CTL
104 # define SET_TSC_CTL(a)		(-EINVAL)
105 #endif
106 #ifndef GET_FP_MODE
107 # define GET_FP_MODE(a)		(-EINVAL)
108 #endif
109 #ifndef SET_FP_MODE
110 # define SET_FP_MODE(a,b)	(-EINVAL)
111 #endif
112 #ifndef SVE_SET_VL
113 # define SVE_SET_VL(a)		(-EINVAL)
114 #endif
115 #ifndef SVE_GET_VL
116 # define SVE_GET_VL()		(-EINVAL)
117 #endif
118 #ifndef PAC_RESET_KEYS
119 # define PAC_RESET_KEYS(a, b)	(-EINVAL)
120 #endif
121 #ifndef SET_TAGGED_ADDR_CTRL
122 # define SET_TAGGED_ADDR_CTRL(a)	(-EINVAL)
123 #endif
124 #ifndef GET_TAGGED_ADDR_CTRL
125 # define GET_TAGGED_ADDR_CTRL()		(-EINVAL)
126 #endif
127 
128 /*
129  * this is where the system-wide overflow UID and GID are defined, for
130  * architectures that now have 32-bit UID/GID but didn't in the past
131  */
132 
133 int overflowuid = DEFAULT_OVERFLOWUID;
134 int overflowgid = DEFAULT_OVERFLOWGID;
135 
136 EXPORT_SYMBOL(overflowuid);
137 EXPORT_SYMBOL(overflowgid);
138 
139 /*
140  * the same as above, but for filesystems which can only store a 16-bit
141  * UID and GID. as such, this is needed on all architectures
142  */
143 
144 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
145 int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
146 
147 EXPORT_SYMBOL(fs_overflowuid);
148 EXPORT_SYMBOL(fs_overflowgid);
149 
150 /*
151  * Returns true if current's euid is same as p's uid or euid,
152  * or has CAP_SYS_NICE to p's user_ns.
153  *
154  * Called with rcu_read_lock, creds are safe
155  */
156 static bool set_one_prio_perm(struct task_struct *p)
157 {
158 	const struct cred *cred = current_cred(), *pcred = __task_cred(p);
159 
160 	if (uid_eq(pcred->uid,  cred->euid) ||
161 	    uid_eq(pcred->euid, cred->euid))
162 		return true;
163 	if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
164 		return true;
165 	return false;
166 }
167 
168 /*
169  * set the priority of a task
170  * - the caller must hold the RCU read lock
171  */
172 static int set_one_prio(struct task_struct *p, int niceval, int error)
173 {
174 	int no_nice;
175 
176 	if (!set_one_prio_perm(p)) {
177 		error = -EPERM;
178 		goto out;
179 	}
180 	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
181 		error = -EACCES;
182 		goto out;
183 	}
184 	no_nice = security_task_setnice(p, niceval);
185 	if (no_nice) {
186 		error = no_nice;
187 		goto out;
188 	}
189 	if (error == -ESRCH)
190 		error = 0;
191 	set_user_nice(p, niceval);
192 out:
193 	return error;
194 }
195 
196 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
197 {
198 	struct task_struct *g, *p;
199 	struct user_struct *user;
200 	const struct cred *cred = current_cred();
201 	int error = -EINVAL;
202 	struct pid *pgrp;
203 	kuid_t uid;
204 
205 	if (which > PRIO_USER || which < PRIO_PROCESS)
206 		goto out;
207 
208 	/* normalize: avoid signed division (rounding problems) */
209 	error = -ESRCH;
210 	if (niceval < MIN_NICE)
211 		niceval = MIN_NICE;
212 	if (niceval > MAX_NICE)
213 		niceval = MAX_NICE;
214 
215 	rcu_read_lock();
216 	read_lock(&tasklist_lock);
217 	switch (which) {
218 	case PRIO_PROCESS:
219 		if (who)
220 			p = find_task_by_vpid(who);
221 		else
222 			p = current;
223 		if (p)
224 			error = set_one_prio(p, niceval, error);
225 		break;
226 	case PRIO_PGRP:
227 		if (who)
228 			pgrp = find_vpid(who);
229 		else
230 			pgrp = task_pgrp(current);
231 		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
232 			error = set_one_prio(p, niceval, error);
233 		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
234 		break;
235 	case PRIO_USER:
236 		uid = make_kuid(cred->user_ns, who);
237 		user = cred->user;
238 		if (!who)
239 			uid = cred->uid;
240 		else if (!uid_eq(uid, cred->uid)) {
241 			user = find_user(uid);
242 			if (!user)
243 				goto out_unlock;	/* No processes for this user */
244 		}
245 		do_each_thread(g, p) {
246 			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
247 				error = set_one_prio(p, niceval, error);
248 		} while_each_thread(g, p);
249 		if (!uid_eq(uid, cred->uid))
250 			free_uid(user);		/* For find_user() */
251 		break;
252 	}
253 out_unlock:
254 	read_unlock(&tasklist_lock);
255 	rcu_read_unlock();
256 out:
257 	return error;
258 }
259 
260 /*
261  * Ugh. To avoid negative return values, "getpriority()" will
262  * not return the normal nice-value, but a negated value that
263  * has been offset by 20 (ie it returns 40..1 instead of -20..19)
264  * to stay compatible.
265  */
266 SYSCALL_DEFINE2(getpriority, int, which, int, who)
267 {
268 	struct task_struct *g, *p;
269 	struct user_struct *user;
270 	const struct cred *cred = current_cred();
271 	long niceval, retval = -ESRCH;
272 	struct pid *pgrp;
273 	kuid_t uid;
274 
275 	if (which > PRIO_USER || which < PRIO_PROCESS)
276 		return -EINVAL;
277 
278 	rcu_read_lock();
279 	read_lock(&tasklist_lock);
280 	switch (which) {
281 	case PRIO_PROCESS:
282 		if (who)
283 			p = find_task_by_vpid(who);
284 		else
285 			p = current;
286 		if (p) {
287 			niceval = nice_to_rlimit(task_nice(p));
288 			if (niceval > retval)
289 				retval = niceval;
290 		}
291 		break;
292 	case PRIO_PGRP:
293 		if (who)
294 			pgrp = find_vpid(who);
295 		else
296 			pgrp = task_pgrp(current);
297 		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
298 			niceval = nice_to_rlimit(task_nice(p));
299 			if (niceval > retval)
300 				retval = niceval;
301 		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
302 		break;
303 	case PRIO_USER:
304 		uid = make_kuid(cred->user_ns, who);
305 		user = cred->user;
306 		if (!who)
307 			uid = cred->uid;
308 		else if (!uid_eq(uid, cred->uid)) {
309 			user = find_user(uid);
310 			if (!user)
311 				goto out_unlock;	/* No processes for this user */
312 		}
313 		do_each_thread(g, p) {
314 			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
315 				niceval = nice_to_rlimit(task_nice(p));
316 				if (niceval > retval)
317 					retval = niceval;
318 			}
319 		} while_each_thread(g, p);
320 		if (!uid_eq(uid, cred->uid))
321 			free_uid(user);		/* for find_user() */
322 		break;
323 	}
324 out_unlock:
325 	read_unlock(&tasklist_lock);
326 	rcu_read_unlock();
327 
328 	return retval;
329 }
330 
331 /*
332  * Unprivileged users may change the real gid to the effective gid
333  * or vice versa.  (BSD-style)
334  *
335  * If you set the real gid at all, or set the effective gid to a value not
336  * equal to the real gid, then the saved gid is set to the new effective gid.
337  *
338  * This makes it possible for a setgid program to completely drop its
339  * privileges, which is often a useful assertion to make when you are doing
340  * a security audit over a program.
341  *
342  * The general idea is that a program which uses just setregid() will be
343  * 100% compatible with BSD.  A program which uses just setgid() will be
344  * 100% compatible with POSIX with saved IDs.
345  *
346  * SMP: There are not races, the GIDs are checked only by filesystem
347  *      operations (as far as semantic preservation is concerned).
348  */
349 #ifdef CONFIG_MULTIUSER
350 long __sys_setregid(gid_t rgid, gid_t egid)
351 {
352 	struct user_namespace *ns = current_user_ns();
353 	const struct cred *old;
354 	struct cred *new;
355 	int retval;
356 	kgid_t krgid, kegid;
357 
358 	krgid = make_kgid(ns, rgid);
359 	kegid = make_kgid(ns, egid);
360 
361 	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
362 		return -EINVAL;
363 	if ((egid != (gid_t) -1) && !gid_valid(kegid))
364 		return -EINVAL;
365 
366 	new = prepare_creds();
367 	if (!new)
368 		return -ENOMEM;
369 	old = current_cred();
370 
371 	retval = -EPERM;
372 	if (rgid != (gid_t) -1) {
373 		if (gid_eq(old->gid, krgid) ||
374 		    gid_eq(old->egid, krgid) ||
375 		    ns_capable(old->user_ns, CAP_SETGID))
376 			new->gid = krgid;
377 		else
378 			goto error;
379 	}
380 	if (egid != (gid_t) -1) {
381 		if (gid_eq(old->gid, kegid) ||
382 		    gid_eq(old->egid, kegid) ||
383 		    gid_eq(old->sgid, kegid) ||
384 		    ns_capable(old->user_ns, CAP_SETGID))
385 			new->egid = kegid;
386 		else
387 			goto error;
388 	}
389 
390 	if (rgid != (gid_t) -1 ||
391 	    (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
392 		new->sgid = new->egid;
393 	new->fsgid = new->egid;
394 
395 	return commit_creds(new);
396 
397 error:
398 	abort_creds(new);
399 	return retval;
400 }
401 
402 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
403 {
404 	return __sys_setregid(rgid, egid);
405 }
406 
407 /*
408  * setgid() is implemented like SysV w/ SAVED_IDS
409  *
410  * SMP: Same implicit races as above.
411  */
412 long __sys_setgid(gid_t gid)
413 {
414 	struct user_namespace *ns = current_user_ns();
415 	const struct cred *old;
416 	struct cred *new;
417 	int retval;
418 	kgid_t kgid;
419 
420 	kgid = make_kgid(ns, gid);
421 	if (!gid_valid(kgid))
422 		return -EINVAL;
423 
424 	new = prepare_creds();
425 	if (!new)
426 		return -ENOMEM;
427 	old = current_cred();
428 
429 	retval = -EPERM;
430 	if (ns_capable(old->user_ns, CAP_SETGID))
431 		new->gid = new->egid = new->sgid = new->fsgid = kgid;
432 	else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
433 		new->egid = new->fsgid = kgid;
434 	else
435 		goto error;
436 
437 	return commit_creds(new);
438 
439 error:
440 	abort_creds(new);
441 	return retval;
442 }
443 
444 SYSCALL_DEFINE1(setgid, gid_t, gid)
445 {
446 	return __sys_setgid(gid);
447 }
448 
449 /*
450  * change the user struct in a credentials set to match the new UID
451  */
452 static int set_user(struct cred *new)
453 {
454 	struct user_struct *new_user;
455 
456 	new_user = alloc_uid(new->uid);
457 	if (!new_user)
458 		return -EAGAIN;
459 
460 	/*
461 	 * We don't fail in case of NPROC limit excess here because too many
462 	 * poorly written programs don't check set*uid() return code, assuming
463 	 * it never fails if called by root.  We may still enforce NPROC limit
464 	 * for programs doing set*uid()+execve() by harmlessly deferring the
465 	 * failure to the execve() stage.
466 	 */
467 	if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
468 			new_user != INIT_USER)
469 		current->flags |= PF_NPROC_EXCEEDED;
470 	else
471 		current->flags &= ~PF_NPROC_EXCEEDED;
472 
473 	free_uid(new->user);
474 	new->user = new_user;
475 	return 0;
476 }
477 
478 /*
479  * Unprivileged users may change the real uid to the effective uid
480  * or vice versa.  (BSD-style)
481  *
482  * If you set the real uid at all, or set the effective uid to a value not
483  * equal to the real uid, then the saved uid is set to the new effective uid.
484  *
485  * This makes it possible for a setuid program to completely drop its
486  * privileges, which is often a useful assertion to make when you are doing
487  * a security audit over a program.
488  *
489  * The general idea is that a program which uses just setreuid() will be
490  * 100% compatible with BSD.  A program which uses just setuid() will be
491  * 100% compatible with POSIX with saved IDs.
492  */
493 long __sys_setreuid(uid_t ruid, uid_t euid)
494 {
495 	struct user_namespace *ns = current_user_ns();
496 	const struct cred *old;
497 	struct cred *new;
498 	int retval;
499 	kuid_t kruid, keuid;
500 
501 	kruid = make_kuid(ns, ruid);
502 	keuid = make_kuid(ns, euid);
503 
504 	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
505 		return -EINVAL;
506 	if ((euid != (uid_t) -1) && !uid_valid(keuid))
507 		return -EINVAL;
508 
509 	new = prepare_creds();
510 	if (!new)
511 		return -ENOMEM;
512 	old = current_cred();
513 
514 	retval = -EPERM;
515 	if (ruid != (uid_t) -1) {
516 		new->uid = kruid;
517 		if (!uid_eq(old->uid, kruid) &&
518 		    !uid_eq(old->euid, kruid) &&
519 		    !ns_capable_setid(old->user_ns, CAP_SETUID))
520 			goto error;
521 	}
522 
523 	if (euid != (uid_t) -1) {
524 		new->euid = keuid;
525 		if (!uid_eq(old->uid, keuid) &&
526 		    !uid_eq(old->euid, keuid) &&
527 		    !uid_eq(old->suid, keuid) &&
528 		    !ns_capable_setid(old->user_ns, CAP_SETUID))
529 			goto error;
530 	}
531 
532 	if (!uid_eq(new->uid, old->uid)) {
533 		retval = set_user(new);
534 		if (retval < 0)
535 			goto error;
536 	}
537 	if (ruid != (uid_t) -1 ||
538 	    (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
539 		new->suid = new->euid;
540 	new->fsuid = new->euid;
541 
542 	retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
543 	if (retval < 0)
544 		goto error;
545 
546 	return commit_creds(new);
547 
548 error:
549 	abort_creds(new);
550 	return retval;
551 }
552 
553 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
554 {
555 	return __sys_setreuid(ruid, euid);
556 }
557 
558 /*
559  * setuid() is implemented like SysV with SAVED_IDS
560  *
561  * Note that SAVED_ID's is deficient in that a setuid root program
562  * like sendmail, for example, cannot set its uid to be a normal
563  * user and then switch back, because if you're root, setuid() sets
564  * the saved uid too.  If you don't like this, blame the bright people
565  * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
566  * will allow a root program to temporarily drop privileges and be able to
567  * regain them by swapping the real and effective uid.
568  */
569 long __sys_setuid(uid_t uid)
570 {
571 	struct user_namespace *ns = current_user_ns();
572 	const struct cred *old;
573 	struct cred *new;
574 	int retval;
575 	kuid_t kuid;
576 
577 	kuid = make_kuid(ns, uid);
578 	if (!uid_valid(kuid))
579 		return -EINVAL;
580 
581 	new = prepare_creds();
582 	if (!new)
583 		return -ENOMEM;
584 	old = current_cred();
585 
586 	retval = -EPERM;
587 	if (ns_capable_setid(old->user_ns, CAP_SETUID)) {
588 		new->suid = new->uid = kuid;
589 		if (!uid_eq(kuid, old->uid)) {
590 			retval = set_user(new);
591 			if (retval < 0)
592 				goto error;
593 		}
594 	} else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
595 		goto error;
596 	}
597 
598 	new->fsuid = new->euid = kuid;
599 
600 	retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
601 	if (retval < 0)
602 		goto error;
603 
604 	return commit_creds(new);
605 
606 error:
607 	abort_creds(new);
608 	return retval;
609 }
610 
611 SYSCALL_DEFINE1(setuid, uid_t, uid)
612 {
613 	return __sys_setuid(uid);
614 }
615 
616 
617 /*
618  * This function implements a generic ability to update ruid, euid,
619  * and suid.  This allows you to implement the 4.4 compatible seteuid().
620  */
621 long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
622 {
623 	struct user_namespace *ns = current_user_ns();
624 	const struct cred *old;
625 	struct cred *new;
626 	int retval;
627 	kuid_t kruid, keuid, ksuid;
628 
629 	kruid = make_kuid(ns, ruid);
630 	keuid = make_kuid(ns, euid);
631 	ksuid = make_kuid(ns, suid);
632 
633 	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
634 		return -EINVAL;
635 
636 	if ((euid != (uid_t) -1) && !uid_valid(keuid))
637 		return -EINVAL;
638 
639 	if ((suid != (uid_t) -1) && !uid_valid(ksuid))
640 		return -EINVAL;
641 
642 	new = prepare_creds();
643 	if (!new)
644 		return -ENOMEM;
645 
646 	old = current_cred();
647 
648 	retval = -EPERM;
649 	if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
650 		if (ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
651 		    !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
652 			goto error;
653 		if (euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
654 		    !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
655 			goto error;
656 		if (suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
657 		    !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
658 			goto error;
659 	}
660 
661 	if (ruid != (uid_t) -1) {
662 		new->uid = kruid;
663 		if (!uid_eq(kruid, old->uid)) {
664 			retval = set_user(new);
665 			if (retval < 0)
666 				goto error;
667 		}
668 	}
669 	if (euid != (uid_t) -1)
670 		new->euid = keuid;
671 	if (suid != (uid_t) -1)
672 		new->suid = ksuid;
673 	new->fsuid = new->euid;
674 
675 	retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
676 	if (retval < 0)
677 		goto error;
678 
679 	return commit_creds(new);
680 
681 error:
682 	abort_creds(new);
683 	return retval;
684 }
685 
686 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
687 {
688 	return __sys_setresuid(ruid, euid, suid);
689 }
690 
691 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
692 {
693 	const struct cred *cred = current_cred();
694 	int retval;
695 	uid_t ruid, euid, suid;
696 
697 	ruid = from_kuid_munged(cred->user_ns, cred->uid);
698 	euid = from_kuid_munged(cred->user_ns, cred->euid);
699 	suid = from_kuid_munged(cred->user_ns, cred->suid);
700 
701 	retval = put_user(ruid, ruidp);
702 	if (!retval) {
703 		retval = put_user(euid, euidp);
704 		if (!retval)
705 			return put_user(suid, suidp);
706 	}
707 	return retval;
708 }
709 
710 /*
711  * Same as above, but for rgid, egid, sgid.
712  */
713 long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
714 {
715 	struct user_namespace *ns = current_user_ns();
716 	const struct cred *old;
717 	struct cred *new;
718 	int retval;
719 	kgid_t krgid, kegid, ksgid;
720 
721 	krgid = make_kgid(ns, rgid);
722 	kegid = make_kgid(ns, egid);
723 	ksgid = make_kgid(ns, sgid);
724 
725 	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
726 		return -EINVAL;
727 	if ((egid != (gid_t) -1) && !gid_valid(kegid))
728 		return -EINVAL;
729 	if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
730 		return -EINVAL;
731 
732 	new = prepare_creds();
733 	if (!new)
734 		return -ENOMEM;
735 	old = current_cred();
736 
737 	retval = -EPERM;
738 	if (!ns_capable(old->user_ns, CAP_SETGID)) {
739 		if (rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
740 		    !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
741 			goto error;
742 		if (egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
743 		    !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
744 			goto error;
745 		if (sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
746 		    !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
747 			goto error;
748 	}
749 
750 	if (rgid != (gid_t) -1)
751 		new->gid = krgid;
752 	if (egid != (gid_t) -1)
753 		new->egid = kegid;
754 	if (sgid != (gid_t) -1)
755 		new->sgid = ksgid;
756 	new->fsgid = new->egid;
757 
758 	return commit_creds(new);
759 
760 error:
761 	abort_creds(new);
762 	return retval;
763 }
764 
765 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
766 {
767 	return __sys_setresgid(rgid, egid, sgid);
768 }
769 
770 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
771 {
772 	const struct cred *cred = current_cred();
773 	int retval;
774 	gid_t rgid, egid, sgid;
775 
776 	rgid = from_kgid_munged(cred->user_ns, cred->gid);
777 	egid = from_kgid_munged(cred->user_ns, cred->egid);
778 	sgid = from_kgid_munged(cred->user_ns, cred->sgid);
779 
780 	retval = put_user(rgid, rgidp);
781 	if (!retval) {
782 		retval = put_user(egid, egidp);
783 		if (!retval)
784 			retval = put_user(sgid, sgidp);
785 	}
786 
787 	return retval;
788 }
789 
790 
791 /*
792  * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
793  * is used for "access()" and for the NFS daemon (letting nfsd stay at
794  * whatever uid it wants to). It normally shadows "euid", except when
795  * explicitly set by setfsuid() or for access..
796  */
797 long __sys_setfsuid(uid_t uid)
798 {
799 	const struct cred *old;
800 	struct cred *new;
801 	uid_t old_fsuid;
802 	kuid_t kuid;
803 
804 	old = current_cred();
805 	old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
806 
807 	kuid = make_kuid(old->user_ns, uid);
808 	if (!uid_valid(kuid))
809 		return old_fsuid;
810 
811 	new = prepare_creds();
812 	if (!new)
813 		return old_fsuid;
814 
815 	if (uid_eq(kuid, old->uid)  || uid_eq(kuid, old->euid)  ||
816 	    uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
817 	    ns_capable_setid(old->user_ns, CAP_SETUID)) {
818 		if (!uid_eq(kuid, old->fsuid)) {
819 			new->fsuid = kuid;
820 			if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
821 				goto change_okay;
822 		}
823 	}
824 
825 	abort_creds(new);
826 	return old_fsuid;
827 
828 change_okay:
829 	commit_creds(new);
830 	return old_fsuid;
831 }
832 
833 SYSCALL_DEFINE1(setfsuid, uid_t, uid)
834 {
835 	return __sys_setfsuid(uid);
836 }
837 
838 /*
839  * Samma på svenska..
840  */
841 long __sys_setfsgid(gid_t gid)
842 {
843 	const struct cred *old;
844 	struct cred *new;
845 	gid_t old_fsgid;
846 	kgid_t kgid;
847 
848 	old = current_cred();
849 	old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
850 
851 	kgid = make_kgid(old->user_ns, gid);
852 	if (!gid_valid(kgid))
853 		return old_fsgid;
854 
855 	new = prepare_creds();
856 	if (!new)
857 		return old_fsgid;
858 
859 	if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
860 	    gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
861 	    ns_capable(old->user_ns, CAP_SETGID)) {
862 		if (!gid_eq(kgid, old->fsgid)) {
863 			new->fsgid = kgid;
864 			goto change_okay;
865 		}
866 	}
867 
868 	abort_creds(new);
869 	return old_fsgid;
870 
871 change_okay:
872 	commit_creds(new);
873 	return old_fsgid;
874 }
875 
876 SYSCALL_DEFINE1(setfsgid, gid_t, gid)
877 {
878 	return __sys_setfsgid(gid);
879 }
880 #endif /* CONFIG_MULTIUSER */
881 
882 /**
883  * sys_getpid - return the thread group id of the current process
884  *
885  * Note, despite the name, this returns the tgid not the pid.  The tgid and
886  * the pid are identical unless CLONE_THREAD was specified on clone() in
887  * which case the tgid is the same in all threads of the same group.
888  *
889  * This is SMP safe as current->tgid does not change.
890  */
891 SYSCALL_DEFINE0(getpid)
892 {
893 	return task_tgid_vnr(current);
894 }
895 
896 /* Thread ID - the internal kernel "pid" */
897 SYSCALL_DEFINE0(gettid)
898 {
899 	return task_pid_vnr(current);
900 }
901 
902 /*
903  * Accessing ->real_parent is not SMP-safe, it could
904  * change from under us. However, we can use a stale
905  * value of ->real_parent under rcu_read_lock(), see
906  * release_task()->call_rcu(delayed_put_task_struct).
907  */
908 SYSCALL_DEFINE0(getppid)
909 {
910 	int pid;
911 
912 	rcu_read_lock();
913 	pid = task_tgid_vnr(rcu_dereference(current->real_parent));
914 	rcu_read_unlock();
915 
916 	return pid;
917 }
918 
919 SYSCALL_DEFINE0(getuid)
920 {
921 	/* Only we change this so SMP safe */
922 	return from_kuid_munged(current_user_ns(), current_uid());
923 }
924 
925 SYSCALL_DEFINE0(geteuid)
926 {
927 	/* Only we change this so SMP safe */
928 	return from_kuid_munged(current_user_ns(), current_euid());
929 }
930 
931 SYSCALL_DEFINE0(getgid)
932 {
933 	/* Only we change this so SMP safe */
934 	return from_kgid_munged(current_user_ns(), current_gid());
935 }
936 
937 SYSCALL_DEFINE0(getegid)
938 {
939 	/* Only we change this so SMP safe */
940 	return from_kgid_munged(current_user_ns(), current_egid());
941 }
942 
943 static void do_sys_times(struct tms *tms)
944 {
945 	u64 tgutime, tgstime, cutime, cstime;
946 
947 	thread_group_cputime_adjusted(current, &tgutime, &tgstime);
948 	cutime = current->signal->cutime;
949 	cstime = current->signal->cstime;
950 	tms->tms_utime = nsec_to_clock_t(tgutime);
951 	tms->tms_stime = nsec_to_clock_t(tgstime);
952 	tms->tms_cutime = nsec_to_clock_t(cutime);
953 	tms->tms_cstime = nsec_to_clock_t(cstime);
954 }
955 
956 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
957 {
958 	if (tbuf) {
959 		struct tms tmp;
960 
961 		do_sys_times(&tmp);
962 		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
963 			return -EFAULT;
964 	}
965 	force_successful_syscall_return();
966 	return (long) jiffies_64_to_clock_t(get_jiffies_64());
967 }
968 
969 #ifdef CONFIG_COMPAT
970 static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
971 {
972 	return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
973 }
974 
975 COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
976 {
977 	if (tbuf) {
978 		struct tms tms;
979 		struct compat_tms tmp;
980 
981 		do_sys_times(&tms);
982 		/* Convert our struct tms to the compat version. */
983 		tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
984 		tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
985 		tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
986 		tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
987 		if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
988 			return -EFAULT;
989 	}
990 	force_successful_syscall_return();
991 	return compat_jiffies_to_clock_t(jiffies);
992 }
993 #endif
994 
995 /*
996  * This needs some heavy checking ...
997  * I just haven't the stomach for it. I also don't fully
998  * understand sessions/pgrp etc. Let somebody who does explain it.
999  *
1000  * OK, I think I have the protection semantics right.... this is really
1001  * only important on a multi-user system anyway, to make sure one user
1002  * can't send a signal to a process owned by another.  -TYT, 12/12/91
1003  *
1004  * !PF_FORKNOEXEC check to conform completely to POSIX.
1005  */
1006 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1007 {
1008 	struct task_struct *p;
1009 	struct task_struct *group_leader = current->group_leader;
1010 	struct pid *pgrp;
1011 	int err;
1012 
1013 	if (!pid)
1014 		pid = task_pid_vnr(group_leader);
1015 	if (!pgid)
1016 		pgid = pid;
1017 	if (pgid < 0)
1018 		return -EINVAL;
1019 	rcu_read_lock();
1020 
1021 	/* From this point forward we keep holding onto the tasklist lock
1022 	 * so that our parent does not change from under us. -DaveM
1023 	 */
1024 	write_lock_irq(&tasklist_lock);
1025 
1026 	err = -ESRCH;
1027 	p = find_task_by_vpid(pid);
1028 	if (!p)
1029 		goto out;
1030 
1031 	err = -EINVAL;
1032 	if (!thread_group_leader(p))
1033 		goto out;
1034 
1035 	if (same_thread_group(p->real_parent, group_leader)) {
1036 		err = -EPERM;
1037 		if (task_session(p) != task_session(group_leader))
1038 			goto out;
1039 		err = -EACCES;
1040 		if (!(p->flags & PF_FORKNOEXEC))
1041 			goto out;
1042 	} else {
1043 		err = -ESRCH;
1044 		if (p != group_leader)
1045 			goto out;
1046 	}
1047 
1048 	err = -EPERM;
1049 	if (p->signal->leader)
1050 		goto out;
1051 
1052 	pgrp = task_pid(p);
1053 	if (pgid != pid) {
1054 		struct task_struct *g;
1055 
1056 		pgrp = find_vpid(pgid);
1057 		g = pid_task(pgrp, PIDTYPE_PGID);
1058 		if (!g || task_session(g) != task_session(group_leader))
1059 			goto out;
1060 	}
1061 
1062 	err = security_task_setpgid(p, pgid);
1063 	if (err)
1064 		goto out;
1065 
1066 	if (task_pgrp(p) != pgrp)
1067 		change_pid(p, PIDTYPE_PGID, pgrp);
1068 
1069 	err = 0;
1070 out:
1071 	/* All paths lead to here, thus we are safe. -DaveM */
1072 	write_unlock_irq(&tasklist_lock);
1073 	rcu_read_unlock();
1074 	return err;
1075 }
1076 
1077 static int do_getpgid(pid_t pid)
1078 {
1079 	struct task_struct *p;
1080 	struct pid *grp;
1081 	int retval;
1082 
1083 	rcu_read_lock();
1084 	if (!pid)
1085 		grp = task_pgrp(current);
1086 	else {
1087 		retval = -ESRCH;
1088 		p = find_task_by_vpid(pid);
1089 		if (!p)
1090 			goto out;
1091 		grp = task_pgrp(p);
1092 		if (!grp)
1093 			goto out;
1094 
1095 		retval = security_task_getpgid(p);
1096 		if (retval)
1097 			goto out;
1098 	}
1099 	retval = pid_vnr(grp);
1100 out:
1101 	rcu_read_unlock();
1102 	return retval;
1103 }
1104 
1105 SYSCALL_DEFINE1(getpgid, pid_t, pid)
1106 {
1107 	return do_getpgid(pid);
1108 }
1109 
1110 #ifdef __ARCH_WANT_SYS_GETPGRP
1111 
1112 SYSCALL_DEFINE0(getpgrp)
1113 {
1114 	return do_getpgid(0);
1115 }
1116 
1117 #endif
1118 
1119 SYSCALL_DEFINE1(getsid, pid_t, pid)
1120 {
1121 	struct task_struct *p;
1122 	struct pid *sid;
1123 	int retval;
1124 
1125 	rcu_read_lock();
1126 	if (!pid)
1127 		sid = task_session(current);
1128 	else {
1129 		retval = -ESRCH;
1130 		p = find_task_by_vpid(pid);
1131 		if (!p)
1132 			goto out;
1133 		sid = task_session(p);
1134 		if (!sid)
1135 			goto out;
1136 
1137 		retval = security_task_getsid(p);
1138 		if (retval)
1139 			goto out;
1140 	}
1141 	retval = pid_vnr(sid);
1142 out:
1143 	rcu_read_unlock();
1144 	return retval;
1145 }
1146 
1147 static void set_special_pids(struct pid *pid)
1148 {
1149 	struct task_struct *curr = current->group_leader;
1150 
1151 	if (task_session(curr) != pid)
1152 		change_pid(curr, PIDTYPE_SID, pid);
1153 
1154 	if (task_pgrp(curr) != pid)
1155 		change_pid(curr, PIDTYPE_PGID, pid);
1156 }
1157 
1158 int ksys_setsid(void)
1159 {
1160 	struct task_struct *group_leader = current->group_leader;
1161 	struct pid *sid = task_pid(group_leader);
1162 	pid_t session = pid_vnr(sid);
1163 	int err = -EPERM;
1164 
1165 	write_lock_irq(&tasklist_lock);
1166 	/* Fail if I am already a session leader */
1167 	if (group_leader->signal->leader)
1168 		goto out;
1169 
1170 	/* Fail if a process group id already exists that equals the
1171 	 * proposed session id.
1172 	 */
1173 	if (pid_task(sid, PIDTYPE_PGID))
1174 		goto out;
1175 
1176 	group_leader->signal->leader = 1;
1177 	set_special_pids(sid);
1178 
1179 	proc_clear_tty(group_leader);
1180 
1181 	err = session;
1182 out:
1183 	write_unlock_irq(&tasklist_lock);
1184 	if (err > 0) {
1185 		proc_sid_connector(group_leader);
1186 		sched_autogroup_create_attach(group_leader);
1187 	}
1188 	return err;
1189 }
1190 
1191 SYSCALL_DEFINE0(setsid)
1192 {
1193 	return ksys_setsid();
1194 }
1195 
1196 DECLARE_RWSEM(uts_sem);
1197 
1198 #ifdef COMPAT_UTS_MACHINE
1199 #define override_architecture(name) \
1200 	(personality(current->personality) == PER_LINUX32 && \
1201 	 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1202 		      sizeof(COMPAT_UTS_MACHINE)))
1203 #else
1204 #define override_architecture(name)	0
1205 #endif
1206 
1207 /*
1208  * Work around broken programs that cannot handle "Linux 3.0".
1209  * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1210  * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1211  * 2.6.60.
1212  */
1213 static int override_release(char __user *release, size_t len)
1214 {
1215 	int ret = 0;
1216 
1217 	if (current->personality & UNAME26) {
1218 		const char *rest = UTS_RELEASE;
1219 		char buf[65] = { 0 };
1220 		int ndots = 0;
1221 		unsigned v;
1222 		size_t copy;
1223 
1224 		while (*rest) {
1225 			if (*rest == '.' && ++ndots >= 3)
1226 				break;
1227 			if (!isdigit(*rest) && *rest != '.')
1228 				break;
1229 			rest++;
1230 		}
1231 		v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1232 		copy = clamp_t(size_t, len, 1, sizeof(buf));
1233 		copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1234 		ret = copy_to_user(release, buf, copy + 1);
1235 	}
1236 	return ret;
1237 }
1238 
1239 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1240 {
1241 	struct new_utsname tmp;
1242 
1243 	down_read(&uts_sem);
1244 	memcpy(&tmp, utsname(), sizeof(tmp));
1245 	up_read(&uts_sem);
1246 	if (copy_to_user(name, &tmp, sizeof(tmp)))
1247 		return -EFAULT;
1248 
1249 	if (override_release(name->release, sizeof(name->release)))
1250 		return -EFAULT;
1251 	if (override_architecture(name))
1252 		return -EFAULT;
1253 	return 0;
1254 }
1255 
1256 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1257 /*
1258  * Old cruft
1259  */
1260 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1261 {
1262 	struct old_utsname tmp;
1263 
1264 	if (!name)
1265 		return -EFAULT;
1266 
1267 	down_read(&uts_sem);
1268 	memcpy(&tmp, utsname(), sizeof(tmp));
1269 	up_read(&uts_sem);
1270 	if (copy_to_user(name, &tmp, sizeof(tmp)))
1271 		return -EFAULT;
1272 
1273 	if (override_release(name->release, sizeof(name->release)))
1274 		return -EFAULT;
1275 	if (override_architecture(name))
1276 		return -EFAULT;
1277 	return 0;
1278 }
1279 
1280 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1281 {
1282 	struct oldold_utsname tmp = {};
1283 
1284 	if (!name)
1285 		return -EFAULT;
1286 
1287 	down_read(&uts_sem);
1288 	memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1289 	memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1290 	memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1291 	memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1292 	memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
1293 	up_read(&uts_sem);
1294 	if (copy_to_user(name, &tmp, sizeof(tmp)))
1295 		return -EFAULT;
1296 
1297 	if (override_architecture(name))
1298 		return -EFAULT;
1299 	if (override_release(name->release, sizeof(name->release)))
1300 		return -EFAULT;
1301 	return 0;
1302 }
1303 #endif
1304 
1305 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1306 {
1307 	int errno;
1308 	char tmp[__NEW_UTS_LEN];
1309 
1310 	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1311 		return -EPERM;
1312 
1313 	if (len < 0 || len > __NEW_UTS_LEN)
1314 		return -EINVAL;
1315 	errno = -EFAULT;
1316 	if (!copy_from_user(tmp, name, len)) {
1317 		struct new_utsname *u;
1318 
1319 		down_write(&uts_sem);
1320 		u = utsname();
1321 		memcpy(u->nodename, tmp, len);
1322 		memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1323 		errno = 0;
1324 		uts_proc_notify(UTS_PROC_HOSTNAME);
1325 		up_write(&uts_sem);
1326 	}
1327 	return errno;
1328 }
1329 
1330 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1331 
1332 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1333 {
1334 	int i;
1335 	struct new_utsname *u;
1336 	char tmp[__NEW_UTS_LEN + 1];
1337 
1338 	if (len < 0)
1339 		return -EINVAL;
1340 	down_read(&uts_sem);
1341 	u = utsname();
1342 	i = 1 + strlen(u->nodename);
1343 	if (i > len)
1344 		i = len;
1345 	memcpy(tmp, u->nodename, i);
1346 	up_read(&uts_sem);
1347 	if (copy_to_user(name, tmp, i))
1348 		return -EFAULT;
1349 	return 0;
1350 }
1351 
1352 #endif
1353 
1354 /*
1355  * Only setdomainname; getdomainname can be implemented by calling
1356  * uname()
1357  */
1358 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1359 {
1360 	int errno;
1361 	char tmp[__NEW_UTS_LEN];
1362 
1363 	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1364 		return -EPERM;
1365 	if (len < 0 || len > __NEW_UTS_LEN)
1366 		return -EINVAL;
1367 
1368 	errno = -EFAULT;
1369 	if (!copy_from_user(tmp, name, len)) {
1370 		struct new_utsname *u;
1371 
1372 		down_write(&uts_sem);
1373 		u = utsname();
1374 		memcpy(u->domainname, tmp, len);
1375 		memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1376 		errno = 0;
1377 		uts_proc_notify(UTS_PROC_DOMAINNAME);
1378 		up_write(&uts_sem);
1379 	}
1380 	return errno;
1381 }
1382 
1383 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1384 {
1385 	struct rlimit value;
1386 	int ret;
1387 
1388 	ret = do_prlimit(current, resource, NULL, &value);
1389 	if (!ret)
1390 		ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1391 
1392 	return ret;
1393 }
1394 
1395 #ifdef CONFIG_COMPAT
1396 
1397 COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
1398 		       struct compat_rlimit __user *, rlim)
1399 {
1400 	struct rlimit r;
1401 	struct compat_rlimit r32;
1402 
1403 	if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
1404 		return -EFAULT;
1405 
1406 	if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
1407 		r.rlim_cur = RLIM_INFINITY;
1408 	else
1409 		r.rlim_cur = r32.rlim_cur;
1410 	if (r32.rlim_max == COMPAT_RLIM_INFINITY)
1411 		r.rlim_max = RLIM_INFINITY;
1412 	else
1413 		r.rlim_max = r32.rlim_max;
1414 	return do_prlimit(current, resource, &r, NULL);
1415 }
1416 
1417 COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
1418 		       struct compat_rlimit __user *, rlim)
1419 {
1420 	struct rlimit r;
1421 	int ret;
1422 
1423 	ret = do_prlimit(current, resource, NULL, &r);
1424 	if (!ret) {
1425 		struct compat_rlimit r32;
1426 		if (r.rlim_cur > COMPAT_RLIM_INFINITY)
1427 			r32.rlim_cur = COMPAT_RLIM_INFINITY;
1428 		else
1429 			r32.rlim_cur = r.rlim_cur;
1430 		if (r.rlim_max > COMPAT_RLIM_INFINITY)
1431 			r32.rlim_max = COMPAT_RLIM_INFINITY;
1432 		else
1433 			r32.rlim_max = r.rlim_max;
1434 
1435 		if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
1436 			return -EFAULT;
1437 	}
1438 	return ret;
1439 }
1440 
1441 #endif
1442 
1443 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1444 
1445 /*
1446  *	Back compatibility for getrlimit. Needed for some apps.
1447  */
1448 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1449 		struct rlimit __user *, rlim)
1450 {
1451 	struct rlimit x;
1452 	if (resource >= RLIM_NLIMITS)
1453 		return -EINVAL;
1454 
1455 	resource = array_index_nospec(resource, RLIM_NLIMITS);
1456 	task_lock(current->group_leader);
1457 	x = current->signal->rlim[resource];
1458 	task_unlock(current->group_leader);
1459 	if (x.rlim_cur > 0x7FFFFFFF)
1460 		x.rlim_cur = 0x7FFFFFFF;
1461 	if (x.rlim_max > 0x7FFFFFFF)
1462 		x.rlim_max = 0x7FFFFFFF;
1463 	return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1464 }
1465 
1466 #ifdef CONFIG_COMPAT
1467 COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1468 		       struct compat_rlimit __user *, rlim)
1469 {
1470 	struct rlimit r;
1471 
1472 	if (resource >= RLIM_NLIMITS)
1473 		return -EINVAL;
1474 
1475 	resource = array_index_nospec(resource, RLIM_NLIMITS);
1476 	task_lock(current->group_leader);
1477 	r = current->signal->rlim[resource];
1478 	task_unlock(current->group_leader);
1479 	if (r.rlim_cur > 0x7FFFFFFF)
1480 		r.rlim_cur = 0x7FFFFFFF;
1481 	if (r.rlim_max > 0x7FFFFFFF)
1482 		r.rlim_max = 0x7FFFFFFF;
1483 
1484 	if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
1485 	    put_user(r.rlim_max, &rlim->rlim_max))
1486 		return -EFAULT;
1487 	return 0;
1488 }
1489 #endif
1490 
1491 #endif
1492 
1493 static inline bool rlim64_is_infinity(__u64 rlim64)
1494 {
1495 #if BITS_PER_LONG < 64
1496 	return rlim64 >= ULONG_MAX;
1497 #else
1498 	return rlim64 == RLIM64_INFINITY;
1499 #endif
1500 }
1501 
1502 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1503 {
1504 	if (rlim->rlim_cur == RLIM_INFINITY)
1505 		rlim64->rlim_cur = RLIM64_INFINITY;
1506 	else
1507 		rlim64->rlim_cur = rlim->rlim_cur;
1508 	if (rlim->rlim_max == RLIM_INFINITY)
1509 		rlim64->rlim_max = RLIM64_INFINITY;
1510 	else
1511 		rlim64->rlim_max = rlim->rlim_max;
1512 }
1513 
1514 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1515 {
1516 	if (rlim64_is_infinity(rlim64->rlim_cur))
1517 		rlim->rlim_cur = RLIM_INFINITY;
1518 	else
1519 		rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1520 	if (rlim64_is_infinity(rlim64->rlim_max))
1521 		rlim->rlim_max = RLIM_INFINITY;
1522 	else
1523 		rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1524 }
1525 
1526 /* make sure you are allowed to change @tsk limits before calling this */
1527 int do_prlimit(struct task_struct *tsk, unsigned int resource,
1528 		struct rlimit *new_rlim, struct rlimit *old_rlim)
1529 {
1530 	struct rlimit *rlim;
1531 	int retval = 0;
1532 
1533 	if (resource >= RLIM_NLIMITS)
1534 		return -EINVAL;
1535 	if (new_rlim) {
1536 		if (new_rlim->rlim_cur > new_rlim->rlim_max)
1537 			return -EINVAL;
1538 		if (resource == RLIMIT_NOFILE &&
1539 				new_rlim->rlim_max > sysctl_nr_open)
1540 			return -EPERM;
1541 	}
1542 
1543 	/* protect tsk->signal and tsk->sighand from disappearing */
1544 	read_lock(&tasklist_lock);
1545 	if (!tsk->sighand) {
1546 		retval = -ESRCH;
1547 		goto out;
1548 	}
1549 
1550 	rlim = tsk->signal->rlim + resource;
1551 	task_lock(tsk->group_leader);
1552 	if (new_rlim) {
1553 		/* Keep the capable check against init_user_ns until
1554 		   cgroups can contain all limits */
1555 		if (new_rlim->rlim_max > rlim->rlim_max &&
1556 				!capable(CAP_SYS_RESOURCE))
1557 			retval = -EPERM;
1558 		if (!retval)
1559 			retval = security_task_setrlimit(tsk, resource, new_rlim);
1560 	}
1561 	if (!retval) {
1562 		if (old_rlim)
1563 			*old_rlim = *rlim;
1564 		if (new_rlim)
1565 			*rlim = *new_rlim;
1566 	}
1567 	task_unlock(tsk->group_leader);
1568 
1569 	/*
1570 	 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not
1571 	 * infite. In case of RLIM_INFINITY the posix CPU timer code
1572 	 * ignores the rlimit.
1573 	 */
1574 	 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1575 	     new_rlim->rlim_cur != RLIM_INFINITY &&
1576 	     IS_ENABLED(CONFIG_POSIX_TIMERS))
1577 		update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1578 out:
1579 	read_unlock(&tasklist_lock);
1580 	return retval;
1581 }
1582 
1583 /* rcu lock must be held */
1584 static int check_prlimit_permission(struct task_struct *task,
1585 				    unsigned int flags)
1586 {
1587 	const struct cred *cred = current_cred(), *tcred;
1588 	bool id_match;
1589 
1590 	if (current == task)
1591 		return 0;
1592 
1593 	tcred = __task_cred(task);
1594 	id_match = (uid_eq(cred->uid, tcred->euid) &&
1595 		    uid_eq(cred->uid, tcred->suid) &&
1596 		    uid_eq(cred->uid, tcred->uid)  &&
1597 		    gid_eq(cred->gid, tcred->egid) &&
1598 		    gid_eq(cred->gid, tcred->sgid) &&
1599 		    gid_eq(cred->gid, tcred->gid));
1600 	if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1601 		return -EPERM;
1602 
1603 	return security_task_prlimit(cred, tcred, flags);
1604 }
1605 
1606 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1607 		const struct rlimit64 __user *, new_rlim,
1608 		struct rlimit64 __user *, old_rlim)
1609 {
1610 	struct rlimit64 old64, new64;
1611 	struct rlimit old, new;
1612 	struct task_struct *tsk;
1613 	unsigned int checkflags = 0;
1614 	int ret;
1615 
1616 	if (old_rlim)
1617 		checkflags |= LSM_PRLIMIT_READ;
1618 
1619 	if (new_rlim) {
1620 		if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1621 			return -EFAULT;
1622 		rlim64_to_rlim(&new64, &new);
1623 		checkflags |= LSM_PRLIMIT_WRITE;
1624 	}
1625 
1626 	rcu_read_lock();
1627 	tsk = pid ? find_task_by_vpid(pid) : current;
1628 	if (!tsk) {
1629 		rcu_read_unlock();
1630 		return -ESRCH;
1631 	}
1632 	ret = check_prlimit_permission(tsk, checkflags);
1633 	if (ret) {
1634 		rcu_read_unlock();
1635 		return ret;
1636 	}
1637 	get_task_struct(tsk);
1638 	rcu_read_unlock();
1639 
1640 	ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1641 			old_rlim ? &old : NULL);
1642 
1643 	if (!ret && old_rlim) {
1644 		rlim_to_rlim64(&old, &old64);
1645 		if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1646 			ret = -EFAULT;
1647 	}
1648 
1649 	put_task_struct(tsk);
1650 	return ret;
1651 }
1652 
1653 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1654 {
1655 	struct rlimit new_rlim;
1656 
1657 	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1658 		return -EFAULT;
1659 	return do_prlimit(current, resource, &new_rlim, NULL);
1660 }
1661 
1662 /*
1663  * It would make sense to put struct rusage in the task_struct,
1664  * except that would make the task_struct be *really big*.  After
1665  * task_struct gets moved into malloc'ed memory, it would
1666  * make sense to do this.  It will make moving the rest of the information
1667  * a lot simpler!  (Which we're not doing right now because we're not
1668  * measuring them yet).
1669  *
1670  * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1671  * races with threads incrementing their own counters.  But since word
1672  * reads are atomic, we either get new values or old values and we don't
1673  * care which for the sums.  We always take the siglock to protect reading
1674  * the c* fields from p->signal from races with exit.c updating those
1675  * fields when reaping, so a sample either gets all the additions of a
1676  * given child after it's reaped, or none so this sample is before reaping.
1677  *
1678  * Locking:
1679  * We need to take the siglock for CHILDEREN, SELF and BOTH
1680  * for  the cases current multithreaded, non-current single threaded
1681  * non-current multithreaded.  Thread traversal is now safe with
1682  * the siglock held.
1683  * Strictly speaking, we donot need to take the siglock if we are current and
1684  * single threaded,  as no one else can take our signal_struct away, no one
1685  * else can  reap the  children to update signal->c* counters, and no one else
1686  * can race with the signal-> fields. If we do not take any lock, the
1687  * signal-> fields could be read out of order while another thread was just
1688  * exiting. So we should  place a read memory barrier when we avoid the lock.
1689  * On the writer side,  write memory barrier is implied in  __exit_signal
1690  * as __exit_signal releases  the siglock spinlock after updating the signal->
1691  * fields. But we don't do this yet to keep things simple.
1692  *
1693  */
1694 
1695 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1696 {
1697 	r->ru_nvcsw += t->nvcsw;
1698 	r->ru_nivcsw += t->nivcsw;
1699 	r->ru_minflt += t->min_flt;
1700 	r->ru_majflt += t->maj_flt;
1701 	r->ru_inblock += task_io_get_inblock(t);
1702 	r->ru_oublock += task_io_get_oublock(t);
1703 }
1704 
1705 void getrusage(struct task_struct *p, int who, struct rusage *r)
1706 {
1707 	struct task_struct *t;
1708 	unsigned long flags;
1709 	u64 tgutime, tgstime, utime, stime;
1710 	unsigned long maxrss = 0;
1711 
1712 	memset((char *)r, 0, sizeof (*r));
1713 	utime = stime = 0;
1714 
1715 	if (who == RUSAGE_THREAD) {
1716 		task_cputime_adjusted(current, &utime, &stime);
1717 		accumulate_thread_rusage(p, r);
1718 		maxrss = p->signal->maxrss;
1719 		goto out;
1720 	}
1721 
1722 	if (!lock_task_sighand(p, &flags))
1723 		return;
1724 
1725 	switch (who) {
1726 	case RUSAGE_BOTH:
1727 	case RUSAGE_CHILDREN:
1728 		utime = p->signal->cutime;
1729 		stime = p->signal->cstime;
1730 		r->ru_nvcsw = p->signal->cnvcsw;
1731 		r->ru_nivcsw = p->signal->cnivcsw;
1732 		r->ru_minflt = p->signal->cmin_flt;
1733 		r->ru_majflt = p->signal->cmaj_flt;
1734 		r->ru_inblock = p->signal->cinblock;
1735 		r->ru_oublock = p->signal->coublock;
1736 		maxrss = p->signal->cmaxrss;
1737 
1738 		if (who == RUSAGE_CHILDREN)
1739 			break;
1740 		/* fall through */
1741 
1742 	case RUSAGE_SELF:
1743 		thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1744 		utime += tgutime;
1745 		stime += tgstime;
1746 		r->ru_nvcsw += p->signal->nvcsw;
1747 		r->ru_nivcsw += p->signal->nivcsw;
1748 		r->ru_minflt += p->signal->min_flt;
1749 		r->ru_majflt += p->signal->maj_flt;
1750 		r->ru_inblock += p->signal->inblock;
1751 		r->ru_oublock += p->signal->oublock;
1752 		if (maxrss < p->signal->maxrss)
1753 			maxrss = p->signal->maxrss;
1754 		t = p;
1755 		do {
1756 			accumulate_thread_rusage(t, r);
1757 		} while_each_thread(p, t);
1758 		break;
1759 
1760 	default:
1761 		BUG();
1762 	}
1763 	unlock_task_sighand(p, &flags);
1764 
1765 out:
1766 	r->ru_utime = ns_to_timeval(utime);
1767 	r->ru_stime = ns_to_timeval(stime);
1768 
1769 	if (who != RUSAGE_CHILDREN) {
1770 		struct mm_struct *mm = get_task_mm(p);
1771 
1772 		if (mm) {
1773 			setmax_mm_hiwater_rss(&maxrss, mm);
1774 			mmput(mm);
1775 		}
1776 	}
1777 	r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1778 }
1779 
1780 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1781 {
1782 	struct rusage r;
1783 
1784 	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1785 	    who != RUSAGE_THREAD)
1786 		return -EINVAL;
1787 
1788 	getrusage(current, who, &r);
1789 	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1790 }
1791 
1792 #ifdef CONFIG_COMPAT
1793 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1794 {
1795 	struct rusage r;
1796 
1797 	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1798 	    who != RUSAGE_THREAD)
1799 		return -EINVAL;
1800 
1801 	getrusage(current, who, &r);
1802 	return put_compat_rusage(&r, ru);
1803 }
1804 #endif
1805 
1806 SYSCALL_DEFINE1(umask, int, mask)
1807 {
1808 	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1809 	return mask;
1810 }
1811 
1812 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1813 {
1814 	struct fd exe;
1815 	struct file *old_exe, *exe_file;
1816 	struct inode *inode;
1817 	int err;
1818 
1819 	exe = fdget(fd);
1820 	if (!exe.file)
1821 		return -EBADF;
1822 
1823 	inode = file_inode(exe.file);
1824 
1825 	/*
1826 	 * Because the original mm->exe_file points to executable file, make
1827 	 * sure that this one is executable as well, to avoid breaking an
1828 	 * overall picture.
1829 	 */
1830 	err = -EACCES;
1831 	if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1832 		goto exit;
1833 
1834 	err = inode_permission(inode, MAY_EXEC);
1835 	if (err)
1836 		goto exit;
1837 
1838 	/*
1839 	 * Forbid mm->exe_file change if old file still mapped.
1840 	 */
1841 	exe_file = get_mm_exe_file(mm);
1842 	err = -EBUSY;
1843 	if (exe_file) {
1844 		struct vm_area_struct *vma;
1845 
1846 		down_read(&mm->mmap_sem);
1847 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
1848 			if (!vma->vm_file)
1849 				continue;
1850 			if (path_equal(&vma->vm_file->f_path,
1851 				       &exe_file->f_path))
1852 				goto exit_err;
1853 		}
1854 
1855 		up_read(&mm->mmap_sem);
1856 		fput(exe_file);
1857 	}
1858 
1859 	err = 0;
1860 	/* set the new file, lockless */
1861 	get_file(exe.file);
1862 	old_exe = xchg(&mm->exe_file, exe.file);
1863 	if (old_exe)
1864 		fput(old_exe);
1865 exit:
1866 	fdput(exe);
1867 	return err;
1868 exit_err:
1869 	up_read(&mm->mmap_sem);
1870 	fput(exe_file);
1871 	goto exit;
1872 }
1873 
1874 /*
1875  * Check arithmetic relations of passed addresses.
1876  *
1877  * WARNING: we don't require any capability here so be very careful
1878  * in what is allowed for modification from userspace.
1879  */
1880 static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
1881 {
1882 	unsigned long mmap_max_addr = TASK_SIZE;
1883 	int error = -EINVAL, i;
1884 
1885 	static const unsigned char offsets[] = {
1886 		offsetof(struct prctl_mm_map, start_code),
1887 		offsetof(struct prctl_mm_map, end_code),
1888 		offsetof(struct prctl_mm_map, start_data),
1889 		offsetof(struct prctl_mm_map, end_data),
1890 		offsetof(struct prctl_mm_map, start_brk),
1891 		offsetof(struct prctl_mm_map, brk),
1892 		offsetof(struct prctl_mm_map, start_stack),
1893 		offsetof(struct prctl_mm_map, arg_start),
1894 		offsetof(struct prctl_mm_map, arg_end),
1895 		offsetof(struct prctl_mm_map, env_start),
1896 		offsetof(struct prctl_mm_map, env_end),
1897 	};
1898 
1899 	/*
1900 	 * Make sure the members are not somewhere outside
1901 	 * of allowed address space.
1902 	 */
1903 	for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1904 		u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1905 
1906 		if ((unsigned long)val >= mmap_max_addr ||
1907 		    (unsigned long)val < mmap_min_addr)
1908 			goto out;
1909 	}
1910 
1911 	/*
1912 	 * Make sure the pairs are ordered.
1913 	 */
1914 #define __prctl_check_order(__m1, __op, __m2)				\
1915 	((unsigned long)prctl_map->__m1 __op				\
1916 	 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1917 	error  = __prctl_check_order(start_code, <, end_code);
1918 	error |= __prctl_check_order(start_data,<=, end_data);
1919 	error |= __prctl_check_order(start_brk, <=, brk);
1920 	error |= __prctl_check_order(arg_start, <=, arg_end);
1921 	error |= __prctl_check_order(env_start, <=, env_end);
1922 	if (error)
1923 		goto out;
1924 #undef __prctl_check_order
1925 
1926 	error = -EINVAL;
1927 
1928 	/*
1929 	 * @brk should be after @end_data in traditional maps.
1930 	 */
1931 	if (prctl_map->start_brk <= prctl_map->end_data ||
1932 	    prctl_map->brk <= prctl_map->end_data)
1933 		goto out;
1934 
1935 	/*
1936 	 * Neither we should allow to override limits if they set.
1937 	 */
1938 	if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1939 			      prctl_map->start_brk, prctl_map->end_data,
1940 			      prctl_map->start_data))
1941 			goto out;
1942 
1943 	error = 0;
1944 out:
1945 	return error;
1946 }
1947 
1948 #ifdef CONFIG_CHECKPOINT_RESTORE
1949 static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1950 {
1951 	struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1952 	unsigned long user_auxv[AT_VECTOR_SIZE];
1953 	struct mm_struct *mm = current->mm;
1954 	int error;
1955 
1956 	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1957 	BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1958 
1959 	if (opt == PR_SET_MM_MAP_SIZE)
1960 		return put_user((unsigned int)sizeof(prctl_map),
1961 				(unsigned int __user *)addr);
1962 
1963 	if (data_size != sizeof(prctl_map))
1964 		return -EINVAL;
1965 
1966 	if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1967 		return -EFAULT;
1968 
1969 	error = validate_prctl_map_addr(&prctl_map);
1970 	if (error)
1971 		return error;
1972 
1973 	if (prctl_map.auxv_size) {
1974 		/*
1975 		 * Someone is trying to cheat the auxv vector.
1976 		 */
1977 		if (!prctl_map.auxv ||
1978 				prctl_map.auxv_size > sizeof(mm->saved_auxv))
1979 			return -EINVAL;
1980 
1981 		memset(user_auxv, 0, sizeof(user_auxv));
1982 		if (copy_from_user(user_auxv,
1983 				   (const void __user *)prctl_map.auxv,
1984 				   prctl_map.auxv_size))
1985 			return -EFAULT;
1986 
1987 		/* Last entry must be AT_NULL as specification requires */
1988 		user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
1989 		user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
1990 	}
1991 
1992 	if (prctl_map.exe_fd != (u32)-1) {
1993 		/*
1994 		 * Make sure the caller has the rights to
1995 		 * change /proc/pid/exe link: only local sys admin should
1996 		 * be allowed to.
1997 		 */
1998 		if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1999 			return -EINVAL;
2000 
2001 		error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2002 		if (error)
2003 			return error;
2004 	}
2005 
2006 	/*
2007 	 * arg_lock protects concurent updates but we still need mmap_sem for
2008 	 * read to exclude races with sys_brk.
2009 	 */
2010 	down_read(&mm->mmap_sem);
2011 
2012 	/*
2013 	 * We don't validate if these members are pointing to
2014 	 * real present VMAs because application may have correspond
2015 	 * VMAs already unmapped and kernel uses these members for statistics
2016 	 * output in procfs mostly, except
2017 	 *
2018 	 *  - @start_brk/@brk which are used in do_brk but kernel lookups
2019 	 *    for VMAs when updating these memvers so anything wrong written
2020 	 *    here cause kernel to swear at userspace program but won't lead
2021 	 *    to any problem in kernel itself
2022 	 */
2023 
2024 	spin_lock(&mm->arg_lock);
2025 	mm->start_code	= prctl_map.start_code;
2026 	mm->end_code	= prctl_map.end_code;
2027 	mm->start_data	= prctl_map.start_data;
2028 	mm->end_data	= prctl_map.end_data;
2029 	mm->start_brk	= prctl_map.start_brk;
2030 	mm->brk		= prctl_map.brk;
2031 	mm->start_stack	= prctl_map.start_stack;
2032 	mm->arg_start	= prctl_map.arg_start;
2033 	mm->arg_end	= prctl_map.arg_end;
2034 	mm->env_start	= prctl_map.env_start;
2035 	mm->env_end	= prctl_map.env_end;
2036 	spin_unlock(&mm->arg_lock);
2037 
2038 	/*
2039 	 * Note this update of @saved_auxv is lockless thus
2040 	 * if someone reads this member in procfs while we're
2041 	 * updating -- it may get partly updated results. It's
2042 	 * known and acceptable trade off: we leave it as is to
2043 	 * not introduce additional locks here making the kernel
2044 	 * more complex.
2045 	 */
2046 	if (prctl_map.auxv_size)
2047 		memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
2048 
2049 	up_read(&mm->mmap_sem);
2050 	return 0;
2051 }
2052 #endif /* CONFIG_CHECKPOINT_RESTORE */
2053 
2054 static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
2055 			  unsigned long len)
2056 {
2057 	/*
2058 	 * This doesn't move the auxiliary vector itself since it's pinned to
2059 	 * mm_struct, but it permits filling the vector with new values.  It's
2060 	 * up to the caller to provide sane values here, otherwise userspace
2061 	 * tools which use this vector might be unhappy.
2062 	 */
2063 	unsigned long user_auxv[AT_VECTOR_SIZE];
2064 
2065 	if (len > sizeof(user_auxv))
2066 		return -EINVAL;
2067 
2068 	if (copy_from_user(user_auxv, (const void __user *)addr, len))
2069 		return -EFAULT;
2070 
2071 	/* Make sure the last entry is always AT_NULL */
2072 	user_auxv[AT_VECTOR_SIZE - 2] = 0;
2073 	user_auxv[AT_VECTOR_SIZE - 1] = 0;
2074 
2075 	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2076 
2077 	task_lock(current);
2078 	memcpy(mm->saved_auxv, user_auxv, len);
2079 	task_unlock(current);
2080 
2081 	return 0;
2082 }
2083 
2084 static int prctl_set_mm(int opt, unsigned long addr,
2085 			unsigned long arg4, unsigned long arg5)
2086 {
2087 	struct mm_struct *mm = current->mm;
2088 	struct prctl_mm_map prctl_map = {
2089 		.auxv = NULL,
2090 		.auxv_size = 0,
2091 		.exe_fd = -1,
2092 	};
2093 	struct vm_area_struct *vma;
2094 	int error;
2095 
2096 	if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
2097 			      opt != PR_SET_MM_MAP &&
2098 			      opt != PR_SET_MM_MAP_SIZE)))
2099 		return -EINVAL;
2100 
2101 #ifdef CONFIG_CHECKPOINT_RESTORE
2102 	if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
2103 		return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
2104 #endif
2105 
2106 	if (!capable(CAP_SYS_RESOURCE))
2107 		return -EPERM;
2108 
2109 	if (opt == PR_SET_MM_EXE_FILE)
2110 		return prctl_set_mm_exe_file(mm, (unsigned int)addr);
2111 
2112 	if (opt == PR_SET_MM_AUXV)
2113 		return prctl_set_auxv(mm, addr, arg4);
2114 
2115 	if (addr >= TASK_SIZE || addr < mmap_min_addr)
2116 		return -EINVAL;
2117 
2118 	error = -EINVAL;
2119 
2120 	/*
2121 	 * arg_lock protects concurent updates of arg boundaries, we need
2122 	 * mmap_sem for a) concurrent sys_brk, b) finding VMA for addr
2123 	 * validation.
2124 	 */
2125 	down_read(&mm->mmap_sem);
2126 	vma = find_vma(mm, addr);
2127 
2128 	spin_lock(&mm->arg_lock);
2129 	prctl_map.start_code	= mm->start_code;
2130 	prctl_map.end_code	= mm->end_code;
2131 	prctl_map.start_data	= mm->start_data;
2132 	prctl_map.end_data	= mm->end_data;
2133 	prctl_map.start_brk	= mm->start_brk;
2134 	prctl_map.brk		= mm->brk;
2135 	prctl_map.start_stack	= mm->start_stack;
2136 	prctl_map.arg_start	= mm->arg_start;
2137 	prctl_map.arg_end	= mm->arg_end;
2138 	prctl_map.env_start	= mm->env_start;
2139 	prctl_map.env_end	= mm->env_end;
2140 
2141 	switch (opt) {
2142 	case PR_SET_MM_START_CODE:
2143 		prctl_map.start_code = addr;
2144 		break;
2145 	case PR_SET_MM_END_CODE:
2146 		prctl_map.end_code = addr;
2147 		break;
2148 	case PR_SET_MM_START_DATA:
2149 		prctl_map.start_data = addr;
2150 		break;
2151 	case PR_SET_MM_END_DATA:
2152 		prctl_map.end_data = addr;
2153 		break;
2154 	case PR_SET_MM_START_STACK:
2155 		prctl_map.start_stack = addr;
2156 		break;
2157 	case PR_SET_MM_START_BRK:
2158 		prctl_map.start_brk = addr;
2159 		break;
2160 	case PR_SET_MM_BRK:
2161 		prctl_map.brk = addr;
2162 		break;
2163 	case PR_SET_MM_ARG_START:
2164 		prctl_map.arg_start = addr;
2165 		break;
2166 	case PR_SET_MM_ARG_END:
2167 		prctl_map.arg_end = addr;
2168 		break;
2169 	case PR_SET_MM_ENV_START:
2170 		prctl_map.env_start = addr;
2171 		break;
2172 	case PR_SET_MM_ENV_END:
2173 		prctl_map.env_end = addr;
2174 		break;
2175 	default:
2176 		goto out;
2177 	}
2178 
2179 	error = validate_prctl_map_addr(&prctl_map);
2180 	if (error)
2181 		goto out;
2182 
2183 	switch (opt) {
2184 	/*
2185 	 * If command line arguments and environment
2186 	 * are placed somewhere else on stack, we can
2187 	 * set them up here, ARG_START/END to setup
2188 	 * command line argumets and ENV_START/END
2189 	 * for environment.
2190 	 */
2191 	case PR_SET_MM_START_STACK:
2192 	case PR_SET_MM_ARG_START:
2193 	case PR_SET_MM_ARG_END:
2194 	case PR_SET_MM_ENV_START:
2195 	case PR_SET_MM_ENV_END:
2196 		if (!vma) {
2197 			error = -EFAULT;
2198 			goto out;
2199 		}
2200 	}
2201 
2202 	mm->start_code	= prctl_map.start_code;
2203 	mm->end_code	= prctl_map.end_code;
2204 	mm->start_data	= prctl_map.start_data;
2205 	mm->end_data	= prctl_map.end_data;
2206 	mm->start_brk	= prctl_map.start_brk;
2207 	mm->brk		= prctl_map.brk;
2208 	mm->start_stack	= prctl_map.start_stack;
2209 	mm->arg_start	= prctl_map.arg_start;
2210 	mm->arg_end	= prctl_map.arg_end;
2211 	mm->env_start	= prctl_map.env_start;
2212 	mm->env_end	= prctl_map.env_end;
2213 
2214 	error = 0;
2215 out:
2216 	spin_unlock(&mm->arg_lock);
2217 	up_read(&mm->mmap_sem);
2218 	return error;
2219 }
2220 
2221 #ifdef CONFIG_CHECKPOINT_RESTORE
2222 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2223 {
2224 	return put_user(me->clear_child_tid, tid_addr);
2225 }
2226 #else
2227 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2228 {
2229 	return -EINVAL;
2230 }
2231 #endif
2232 
2233 static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2234 {
2235 	/*
2236 	 * If task has has_child_subreaper - all its decendants
2237 	 * already have these flag too and new decendants will
2238 	 * inherit it on fork, skip them.
2239 	 *
2240 	 * If we've found child_reaper - skip descendants in
2241 	 * it's subtree as they will never get out pidns.
2242 	 */
2243 	if (p->signal->has_child_subreaper ||
2244 	    is_child_reaper(task_pid(p)))
2245 		return 0;
2246 
2247 	p->signal->has_child_subreaper = 1;
2248 	return 1;
2249 }
2250 
2251 int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2252 {
2253 	return -EINVAL;
2254 }
2255 
2256 int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2257 				    unsigned long ctrl)
2258 {
2259 	return -EINVAL;
2260 }
2261 
2262 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2263 		unsigned long, arg4, unsigned long, arg5)
2264 {
2265 	struct task_struct *me = current;
2266 	unsigned char comm[sizeof(me->comm)];
2267 	long error;
2268 
2269 	error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2270 	if (error != -ENOSYS)
2271 		return error;
2272 
2273 	error = 0;
2274 	switch (option) {
2275 	case PR_SET_PDEATHSIG:
2276 		if (!valid_signal(arg2)) {
2277 			error = -EINVAL;
2278 			break;
2279 		}
2280 		me->pdeath_signal = arg2;
2281 		break;
2282 	case PR_GET_PDEATHSIG:
2283 		error = put_user(me->pdeath_signal, (int __user *)arg2);
2284 		break;
2285 	case PR_GET_DUMPABLE:
2286 		error = get_dumpable(me->mm);
2287 		break;
2288 	case PR_SET_DUMPABLE:
2289 		if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2290 			error = -EINVAL;
2291 			break;
2292 		}
2293 		set_dumpable(me->mm, arg2);
2294 		break;
2295 
2296 	case PR_SET_UNALIGN:
2297 		error = SET_UNALIGN_CTL(me, arg2);
2298 		break;
2299 	case PR_GET_UNALIGN:
2300 		error = GET_UNALIGN_CTL(me, arg2);
2301 		break;
2302 	case PR_SET_FPEMU:
2303 		error = SET_FPEMU_CTL(me, arg2);
2304 		break;
2305 	case PR_GET_FPEMU:
2306 		error = GET_FPEMU_CTL(me, arg2);
2307 		break;
2308 	case PR_SET_FPEXC:
2309 		error = SET_FPEXC_CTL(me, arg2);
2310 		break;
2311 	case PR_GET_FPEXC:
2312 		error = GET_FPEXC_CTL(me, arg2);
2313 		break;
2314 	case PR_GET_TIMING:
2315 		error = PR_TIMING_STATISTICAL;
2316 		break;
2317 	case PR_SET_TIMING:
2318 		if (arg2 != PR_TIMING_STATISTICAL)
2319 			error = -EINVAL;
2320 		break;
2321 	case PR_SET_NAME:
2322 		comm[sizeof(me->comm) - 1] = 0;
2323 		if (strncpy_from_user(comm, (char __user *)arg2,
2324 				      sizeof(me->comm) - 1) < 0)
2325 			return -EFAULT;
2326 		set_task_comm(me, comm);
2327 		proc_comm_connector(me);
2328 		break;
2329 	case PR_GET_NAME:
2330 		get_task_comm(comm, me);
2331 		if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2332 			return -EFAULT;
2333 		break;
2334 	case PR_GET_ENDIAN:
2335 		error = GET_ENDIAN(me, arg2);
2336 		break;
2337 	case PR_SET_ENDIAN:
2338 		error = SET_ENDIAN(me, arg2);
2339 		break;
2340 	case PR_GET_SECCOMP:
2341 		error = prctl_get_seccomp();
2342 		break;
2343 	case PR_SET_SECCOMP:
2344 		error = prctl_set_seccomp(arg2, (char __user *)arg3);
2345 		break;
2346 	case PR_GET_TSC:
2347 		error = GET_TSC_CTL(arg2);
2348 		break;
2349 	case PR_SET_TSC:
2350 		error = SET_TSC_CTL(arg2);
2351 		break;
2352 	case PR_TASK_PERF_EVENTS_DISABLE:
2353 		error = perf_event_task_disable();
2354 		break;
2355 	case PR_TASK_PERF_EVENTS_ENABLE:
2356 		error = perf_event_task_enable();
2357 		break;
2358 	case PR_GET_TIMERSLACK:
2359 		if (current->timer_slack_ns > ULONG_MAX)
2360 			error = ULONG_MAX;
2361 		else
2362 			error = current->timer_slack_ns;
2363 		break;
2364 	case PR_SET_TIMERSLACK:
2365 		if (arg2 <= 0)
2366 			current->timer_slack_ns =
2367 					current->default_timer_slack_ns;
2368 		else
2369 			current->timer_slack_ns = arg2;
2370 		break;
2371 	case PR_MCE_KILL:
2372 		if (arg4 | arg5)
2373 			return -EINVAL;
2374 		switch (arg2) {
2375 		case PR_MCE_KILL_CLEAR:
2376 			if (arg3 != 0)
2377 				return -EINVAL;
2378 			current->flags &= ~PF_MCE_PROCESS;
2379 			break;
2380 		case PR_MCE_KILL_SET:
2381 			current->flags |= PF_MCE_PROCESS;
2382 			if (arg3 == PR_MCE_KILL_EARLY)
2383 				current->flags |= PF_MCE_EARLY;
2384 			else if (arg3 == PR_MCE_KILL_LATE)
2385 				current->flags &= ~PF_MCE_EARLY;
2386 			else if (arg3 == PR_MCE_KILL_DEFAULT)
2387 				current->flags &=
2388 						~(PF_MCE_EARLY|PF_MCE_PROCESS);
2389 			else
2390 				return -EINVAL;
2391 			break;
2392 		default:
2393 			return -EINVAL;
2394 		}
2395 		break;
2396 	case PR_MCE_KILL_GET:
2397 		if (arg2 | arg3 | arg4 | arg5)
2398 			return -EINVAL;
2399 		if (current->flags & PF_MCE_PROCESS)
2400 			error = (current->flags & PF_MCE_EARLY) ?
2401 				PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2402 		else
2403 			error = PR_MCE_KILL_DEFAULT;
2404 		break;
2405 	case PR_SET_MM:
2406 		error = prctl_set_mm(arg2, arg3, arg4, arg5);
2407 		break;
2408 	case PR_GET_TID_ADDRESS:
2409 		error = prctl_get_tid_address(me, (int __user **)arg2);
2410 		break;
2411 	case PR_SET_CHILD_SUBREAPER:
2412 		me->signal->is_child_subreaper = !!arg2;
2413 		if (!arg2)
2414 			break;
2415 
2416 		walk_process_tree(me, propagate_has_child_subreaper, NULL);
2417 		break;
2418 	case PR_GET_CHILD_SUBREAPER:
2419 		error = put_user(me->signal->is_child_subreaper,
2420 				 (int __user *)arg2);
2421 		break;
2422 	case PR_SET_NO_NEW_PRIVS:
2423 		if (arg2 != 1 || arg3 || arg4 || arg5)
2424 			return -EINVAL;
2425 
2426 		task_set_no_new_privs(current);
2427 		break;
2428 	case PR_GET_NO_NEW_PRIVS:
2429 		if (arg2 || arg3 || arg4 || arg5)
2430 			return -EINVAL;
2431 		return task_no_new_privs(current) ? 1 : 0;
2432 	case PR_GET_THP_DISABLE:
2433 		if (arg2 || arg3 || arg4 || arg5)
2434 			return -EINVAL;
2435 		error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
2436 		break;
2437 	case PR_SET_THP_DISABLE:
2438 		if (arg3 || arg4 || arg5)
2439 			return -EINVAL;
2440 		if (down_write_killable(&me->mm->mmap_sem))
2441 			return -EINTR;
2442 		if (arg2)
2443 			set_bit(MMF_DISABLE_THP, &me->mm->flags);
2444 		else
2445 			clear_bit(MMF_DISABLE_THP, &me->mm->flags);
2446 		up_write(&me->mm->mmap_sem);
2447 		break;
2448 	case PR_MPX_ENABLE_MANAGEMENT:
2449 	case PR_MPX_DISABLE_MANAGEMENT:
2450 		/* No longer implemented: */
2451 		return -EINVAL;
2452 	case PR_SET_FP_MODE:
2453 		error = SET_FP_MODE(me, arg2);
2454 		break;
2455 	case PR_GET_FP_MODE:
2456 		error = GET_FP_MODE(me);
2457 		break;
2458 	case PR_SVE_SET_VL:
2459 		error = SVE_SET_VL(arg2);
2460 		break;
2461 	case PR_SVE_GET_VL:
2462 		error = SVE_GET_VL();
2463 		break;
2464 	case PR_GET_SPECULATION_CTRL:
2465 		if (arg3 || arg4 || arg5)
2466 			return -EINVAL;
2467 		error = arch_prctl_spec_ctrl_get(me, arg2);
2468 		break;
2469 	case PR_SET_SPECULATION_CTRL:
2470 		if (arg4 || arg5)
2471 			return -EINVAL;
2472 		error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2473 		break;
2474 	case PR_PAC_RESET_KEYS:
2475 		if (arg3 || arg4 || arg5)
2476 			return -EINVAL;
2477 		error = PAC_RESET_KEYS(me, arg2);
2478 		break;
2479 	case PR_SET_TAGGED_ADDR_CTRL:
2480 		if (arg3 || arg4 || arg5)
2481 			return -EINVAL;
2482 		error = SET_TAGGED_ADDR_CTRL(arg2);
2483 		break;
2484 	case PR_GET_TAGGED_ADDR_CTRL:
2485 		if (arg2 || arg3 || arg4 || arg5)
2486 			return -EINVAL;
2487 		error = GET_TAGGED_ADDR_CTRL();
2488 		break;
2489 	default:
2490 		error = -EINVAL;
2491 		break;
2492 	}
2493 	return error;
2494 }
2495 
2496 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2497 		struct getcpu_cache __user *, unused)
2498 {
2499 	int err = 0;
2500 	int cpu = raw_smp_processor_id();
2501 
2502 	if (cpup)
2503 		err |= put_user(cpu, cpup);
2504 	if (nodep)
2505 		err |= put_user(cpu_to_node(cpu), nodep);
2506 	return err ? -EFAULT : 0;
2507 }
2508 
2509 /**
2510  * do_sysinfo - fill in sysinfo struct
2511  * @info: pointer to buffer to fill
2512  */
2513 static int do_sysinfo(struct sysinfo *info)
2514 {
2515 	unsigned long mem_total, sav_total;
2516 	unsigned int mem_unit, bitcount;
2517 	struct timespec64 tp;
2518 
2519 	memset(info, 0, sizeof(struct sysinfo));
2520 
2521 	ktime_get_boottime_ts64(&tp);
2522 	info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2523 
2524 	get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2525 
2526 	info->procs = nr_threads;
2527 
2528 	si_meminfo(info);
2529 	si_swapinfo(info);
2530 
2531 	/*
2532 	 * If the sum of all the available memory (i.e. ram + swap)
2533 	 * is less than can be stored in a 32 bit unsigned long then
2534 	 * we can be binary compatible with 2.2.x kernels.  If not,
2535 	 * well, in that case 2.2.x was broken anyways...
2536 	 *
2537 	 *  -Erik Andersen <andersee@debian.org>
2538 	 */
2539 
2540 	mem_total = info->totalram + info->totalswap;
2541 	if (mem_total < info->totalram || mem_total < info->totalswap)
2542 		goto out;
2543 	bitcount = 0;
2544 	mem_unit = info->mem_unit;
2545 	while (mem_unit > 1) {
2546 		bitcount++;
2547 		mem_unit >>= 1;
2548 		sav_total = mem_total;
2549 		mem_total <<= 1;
2550 		if (mem_total < sav_total)
2551 			goto out;
2552 	}
2553 
2554 	/*
2555 	 * If mem_total did not overflow, multiply all memory values by
2556 	 * info->mem_unit and set it to 1.  This leaves things compatible
2557 	 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2558 	 * kernels...
2559 	 */
2560 
2561 	info->mem_unit = 1;
2562 	info->totalram <<= bitcount;
2563 	info->freeram <<= bitcount;
2564 	info->sharedram <<= bitcount;
2565 	info->bufferram <<= bitcount;
2566 	info->totalswap <<= bitcount;
2567 	info->freeswap <<= bitcount;
2568 	info->totalhigh <<= bitcount;
2569 	info->freehigh <<= bitcount;
2570 
2571 out:
2572 	return 0;
2573 }
2574 
2575 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2576 {
2577 	struct sysinfo val;
2578 
2579 	do_sysinfo(&val);
2580 
2581 	if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2582 		return -EFAULT;
2583 
2584 	return 0;
2585 }
2586 
2587 #ifdef CONFIG_COMPAT
2588 struct compat_sysinfo {
2589 	s32 uptime;
2590 	u32 loads[3];
2591 	u32 totalram;
2592 	u32 freeram;
2593 	u32 sharedram;
2594 	u32 bufferram;
2595 	u32 totalswap;
2596 	u32 freeswap;
2597 	u16 procs;
2598 	u16 pad;
2599 	u32 totalhigh;
2600 	u32 freehigh;
2601 	u32 mem_unit;
2602 	char _f[20-2*sizeof(u32)-sizeof(int)];
2603 };
2604 
2605 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2606 {
2607 	struct sysinfo s;
2608 
2609 	do_sysinfo(&s);
2610 
2611 	/* Check to see if any memory value is too large for 32-bit and scale
2612 	 *  down if needed
2613 	 */
2614 	if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2615 		int bitcount = 0;
2616 
2617 		while (s.mem_unit < PAGE_SIZE) {
2618 			s.mem_unit <<= 1;
2619 			bitcount++;
2620 		}
2621 
2622 		s.totalram >>= bitcount;
2623 		s.freeram >>= bitcount;
2624 		s.sharedram >>= bitcount;
2625 		s.bufferram >>= bitcount;
2626 		s.totalswap >>= bitcount;
2627 		s.freeswap >>= bitcount;
2628 		s.totalhigh >>= bitcount;
2629 		s.freehigh >>= bitcount;
2630 	}
2631 
2632 	if (!access_ok(info, sizeof(struct compat_sysinfo)) ||
2633 	    __put_user(s.uptime, &info->uptime) ||
2634 	    __put_user(s.loads[0], &info->loads[0]) ||
2635 	    __put_user(s.loads[1], &info->loads[1]) ||
2636 	    __put_user(s.loads[2], &info->loads[2]) ||
2637 	    __put_user(s.totalram, &info->totalram) ||
2638 	    __put_user(s.freeram, &info->freeram) ||
2639 	    __put_user(s.sharedram, &info->sharedram) ||
2640 	    __put_user(s.bufferram, &info->bufferram) ||
2641 	    __put_user(s.totalswap, &info->totalswap) ||
2642 	    __put_user(s.freeswap, &info->freeswap) ||
2643 	    __put_user(s.procs, &info->procs) ||
2644 	    __put_user(s.totalhigh, &info->totalhigh) ||
2645 	    __put_user(s.freehigh, &info->freehigh) ||
2646 	    __put_user(s.mem_unit, &info->mem_unit))
2647 		return -EFAULT;
2648 
2649 	return 0;
2650 }
2651 #endif /* CONFIG_COMPAT */
2652