1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/kernel/sys.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/export.h> 9 #include <linux/mm.h> 10 #include <linux/mm_inline.h> 11 #include <linux/utsname.h> 12 #include <linux/mman.h> 13 #include <linux/reboot.h> 14 #include <linux/prctl.h> 15 #include <linux/highuid.h> 16 #include <linux/fs.h> 17 #include <linux/kmod.h> 18 #include <linux/ksm.h> 19 #include <linux/perf_event.h> 20 #include <linux/resource.h> 21 #include <linux/kernel.h> 22 #include <linux/workqueue.h> 23 #include <linux/capability.h> 24 #include <linux/device.h> 25 #include <linux/key.h> 26 #include <linux/times.h> 27 #include <linux/posix-timers.h> 28 #include <linux/security.h> 29 #include <linux/random.h> 30 #include <linux/suspend.h> 31 #include <linux/tty.h> 32 #include <linux/signal.h> 33 #include <linux/cn_proc.h> 34 #include <linux/getcpu.h> 35 #include <linux/task_io_accounting_ops.h> 36 #include <linux/seccomp.h> 37 #include <linux/cpu.h> 38 #include <linux/personality.h> 39 #include <linux/ptrace.h> 40 #include <linux/fs_struct.h> 41 #include <linux/file.h> 42 #include <linux/mount.h> 43 #include <linux/gfp.h> 44 #include <linux/syscore_ops.h> 45 #include <linux/version.h> 46 #include <linux/ctype.h> 47 #include <linux/syscall_user_dispatch.h> 48 49 #include <linux/compat.h> 50 #include <linux/syscalls.h> 51 #include <linux/kprobes.h> 52 #include <linux/user_namespace.h> 53 #include <linux/time_namespace.h> 54 #include <linux/binfmts.h> 55 56 #include <linux/sched.h> 57 #include <linux/sched/autogroup.h> 58 #include <linux/sched/loadavg.h> 59 #include <linux/sched/stat.h> 60 #include <linux/sched/mm.h> 61 #include <linux/sched/coredump.h> 62 #include <linux/sched/task.h> 63 #include <linux/sched/cputime.h> 64 #include <linux/rcupdate.h> 65 #include <linux/uidgid.h> 66 #include <linux/cred.h> 67 68 #include <linux/nospec.h> 69 70 #include <linux/kmsg_dump.h> 71 /* Move somewhere else to avoid recompiling? */ 72 #include <generated/utsrelease.h> 73 74 #include <linux/uaccess.h> 75 #include <asm/io.h> 76 #include <asm/unistd.h> 77 78 #include <trace/events/task.h> 79 80 #include "uid16.h" 81 82 #ifndef SET_UNALIGN_CTL 83 # define SET_UNALIGN_CTL(a, b) (-EINVAL) 84 #endif 85 #ifndef GET_UNALIGN_CTL 86 # define GET_UNALIGN_CTL(a, b) (-EINVAL) 87 #endif 88 #ifndef SET_FPEMU_CTL 89 # define SET_FPEMU_CTL(a, b) (-EINVAL) 90 #endif 91 #ifndef GET_FPEMU_CTL 92 # define GET_FPEMU_CTL(a, b) (-EINVAL) 93 #endif 94 #ifndef SET_FPEXC_CTL 95 # define SET_FPEXC_CTL(a, b) (-EINVAL) 96 #endif 97 #ifndef GET_FPEXC_CTL 98 # define GET_FPEXC_CTL(a, b) (-EINVAL) 99 #endif 100 #ifndef GET_ENDIAN 101 # define GET_ENDIAN(a, b) (-EINVAL) 102 #endif 103 #ifndef SET_ENDIAN 104 # define SET_ENDIAN(a, b) (-EINVAL) 105 #endif 106 #ifndef GET_TSC_CTL 107 # define GET_TSC_CTL(a) (-EINVAL) 108 #endif 109 #ifndef SET_TSC_CTL 110 # define SET_TSC_CTL(a) (-EINVAL) 111 #endif 112 #ifndef GET_FP_MODE 113 # define GET_FP_MODE(a) (-EINVAL) 114 #endif 115 #ifndef SET_FP_MODE 116 # define SET_FP_MODE(a,b) (-EINVAL) 117 #endif 118 #ifndef SVE_SET_VL 119 # define SVE_SET_VL(a) (-EINVAL) 120 #endif 121 #ifndef SVE_GET_VL 122 # define SVE_GET_VL() (-EINVAL) 123 #endif 124 #ifndef SME_SET_VL 125 # define SME_SET_VL(a) (-EINVAL) 126 #endif 127 #ifndef SME_GET_VL 128 # define SME_GET_VL() (-EINVAL) 129 #endif 130 #ifndef PAC_RESET_KEYS 131 # define PAC_RESET_KEYS(a, b) (-EINVAL) 132 #endif 133 #ifndef PAC_SET_ENABLED_KEYS 134 # define PAC_SET_ENABLED_KEYS(a, b, c) (-EINVAL) 135 #endif 136 #ifndef PAC_GET_ENABLED_KEYS 137 # define PAC_GET_ENABLED_KEYS(a) (-EINVAL) 138 #endif 139 #ifndef SET_TAGGED_ADDR_CTRL 140 # define SET_TAGGED_ADDR_CTRL(a) (-EINVAL) 141 #endif 142 #ifndef GET_TAGGED_ADDR_CTRL 143 # define GET_TAGGED_ADDR_CTRL() (-EINVAL) 144 #endif 145 #ifndef RISCV_V_SET_CONTROL 146 # define RISCV_V_SET_CONTROL(a) (-EINVAL) 147 #endif 148 #ifndef RISCV_V_GET_CONTROL 149 # define RISCV_V_GET_CONTROL() (-EINVAL) 150 #endif 151 #ifndef RISCV_SET_ICACHE_FLUSH_CTX 152 # define RISCV_SET_ICACHE_FLUSH_CTX(a, b) (-EINVAL) 153 #endif 154 #ifndef PPC_GET_DEXCR_ASPECT 155 # define PPC_GET_DEXCR_ASPECT(a, b) (-EINVAL) 156 #endif 157 #ifndef PPC_SET_DEXCR_ASPECT 158 # define PPC_SET_DEXCR_ASPECT(a, b, c) (-EINVAL) 159 #endif 160 161 /* 162 * this is where the system-wide overflow UID and GID are defined, for 163 * architectures that now have 32-bit UID/GID but didn't in the past 164 */ 165 166 int overflowuid = DEFAULT_OVERFLOWUID; 167 int overflowgid = DEFAULT_OVERFLOWGID; 168 169 EXPORT_SYMBOL(overflowuid); 170 EXPORT_SYMBOL(overflowgid); 171 172 /* 173 * the same as above, but for filesystems which can only store a 16-bit 174 * UID and GID. as such, this is needed on all architectures 175 */ 176 177 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; 178 int fs_overflowgid = DEFAULT_FS_OVERFLOWGID; 179 180 EXPORT_SYMBOL(fs_overflowuid); 181 EXPORT_SYMBOL(fs_overflowgid); 182 183 /* 184 * Returns true if current's euid is same as p's uid or euid, 185 * or has CAP_SYS_NICE to p's user_ns. 186 * 187 * Called with rcu_read_lock, creds are safe 188 */ 189 static bool set_one_prio_perm(struct task_struct *p) 190 { 191 const struct cred *cred = current_cred(), *pcred = __task_cred(p); 192 193 if (uid_eq(pcred->uid, cred->euid) || 194 uid_eq(pcred->euid, cred->euid)) 195 return true; 196 if (ns_capable(pcred->user_ns, CAP_SYS_NICE)) 197 return true; 198 return false; 199 } 200 201 /* 202 * set the priority of a task 203 * - the caller must hold the RCU read lock 204 */ 205 static int set_one_prio(struct task_struct *p, int niceval, int error) 206 { 207 int no_nice; 208 209 if (!set_one_prio_perm(p)) { 210 error = -EPERM; 211 goto out; 212 } 213 if (niceval < task_nice(p) && !can_nice(p, niceval)) { 214 error = -EACCES; 215 goto out; 216 } 217 no_nice = security_task_setnice(p, niceval); 218 if (no_nice) { 219 error = no_nice; 220 goto out; 221 } 222 if (error == -ESRCH) 223 error = 0; 224 set_user_nice(p, niceval); 225 out: 226 return error; 227 } 228 229 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) 230 { 231 struct task_struct *g, *p; 232 struct user_struct *user; 233 const struct cred *cred = current_cred(); 234 int error = -EINVAL; 235 struct pid *pgrp; 236 kuid_t uid; 237 238 if (which > PRIO_USER || which < PRIO_PROCESS) 239 goto out; 240 241 /* normalize: avoid signed division (rounding problems) */ 242 error = -ESRCH; 243 if (niceval < MIN_NICE) 244 niceval = MIN_NICE; 245 if (niceval > MAX_NICE) 246 niceval = MAX_NICE; 247 248 rcu_read_lock(); 249 switch (which) { 250 case PRIO_PROCESS: 251 if (who) 252 p = find_task_by_vpid(who); 253 else 254 p = current; 255 if (p) 256 error = set_one_prio(p, niceval, error); 257 break; 258 case PRIO_PGRP: 259 if (who) 260 pgrp = find_vpid(who); 261 else 262 pgrp = task_pgrp(current); 263 read_lock(&tasklist_lock); 264 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 265 error = set_one_prio(p, niceval, error); 266 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 267 read_unlock(&tasklist_lock); 268 break; 269 case PRIO_USER: 270 uid = make_kuid(cred->user_ns, who); 271 user = cred->user; 272 if (!who) 273 uid = cred->uid; 274 else if (!uid_eq(uid, cred->uid)) { 275 user = find_user(uid); 276 if (!user) 277 goto out_unlock; /* No processes for this user */ 278 } 279 for_each_process_thread(g, p) { 280 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) 281 error = set_one_prio(p, niceval, error); 282 } 283 if (!uid_eq(uid, cred->uid)) 284 free_uid(user); /* For find_user() */ 285 break; 286 } 287 out_unlock: 288 rcu_read_unlock(); 289 out: 290 return error; 291 } 292 293 /* 294 * Ugh. To avoid negative return values, "getpriority()" will 295 * not return the normal nice-value, but a negated value that 296 * has been offset by 20 (ie it returns 40..1 instead of -20..19) 297 * to stay compatible. 298 */ 299 SYSCALL_DEFINE2(getpriority, int, which, int, who) 300 { 301 struct task_struct *g, *p; 302 struct user_struct *user; 303 const struct cred *cred = current_cred(); 304 long niceval, retval = -ESRCH; 305 struct pid *pgrp; 306 kuid_t uid; 307 308 if (which > PRIO_USER || which < PRIO_PROCESS) 309 return -EINVAL; 310 311 rcu_read_lock(); 312 switch (which) { 313 case PRIO_PROCESS: 314 if (who) 315 p = find_task_by_vpid(who); 316 else 317 p = current; 318 if (p) { 319 niceval = nice_to_rlimit(task_nice(p)); 320 if (niceval > retval) 321 retval = niceval; 322 } 323 break; 324 case PRIO_PGRP: 325 if (who) 326 pgrp = find_vpid(who); 327 else 328 pgrp = task_pgrp(current); 329 read_lock(&tasklist_lock); 330 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 331 niceval = nice_to_rlimit(task_nice(p)); 332 if (niceval > retval) 333 retval = niceval; 334 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 335 read_unlock(&tasklist_lock); 336 break; 337 case PRIO_USER: 338 uid = make_kuid(cred->user_ns, who); 339 user = cred->user; 340 if (!who) 341 uid = cred->uid; 342 else if (!uid_eq(uid, cred->uid)) { 343 user = find_user(uid); 344 if (!user) 345 goto out_unlock; /* No processes for this user */ 346 } 347 for_each_process_thread(g, p) { 348 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) { 349 niceval = nice_to_rlimit(task_nice(p)); 350 if (niceval > retval) 351 retval = niceval; 352 } 353 } 354 if (!uid_eq(uid, cred->uid)) 355 free_uid(user); /* for find_user() */ 356 break; 357 } 358 out_unlock: 359 rcu_read_unlock(); 360 361 return retval; 362 } 363 364 /* 365 * Unprivileged users may change the real gid to the effective gid 366 * or vice versa. (BSD-style) 367 * 368 * If you set the real gid at all, or set the effective gid to a value not 369 * equal to the real gid, then the saved gid is set to the new effective gid. 370 * 371 * This makes it possible for a setgid program to completely drop its 372 * privileges, which is often a useful assertion to make when you are doing 373 * a security audit over a program. 374 * 375 * The general idea is that a program which uses just setregid() will be 376 * 100% compatible with BSD. A program which uses just setgid() will be 377 * 100% compatible with POSIX with saved IDs. 378 * 379 * SMP: There are not races, the GIDs are checked only by filesystem 380 * operations (as far as semantic preservation is concerned). 381 */ 382 #ifdef CONFIG_MULTIUSER 383 long __sys_setregid(gid_t rgid, gid_t egid) 384 { 385 struct user_namespace *ns = current_user_ns(); 386 const struct cred *old; 387 struct cred *new; 388 int retval; 389 kgid_t krgid, kegid; 390 391 krgid = make_kgid(ns, rgid); 392 kegid = make_kgid(ns, egid); 393 394 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 395 return -EINVAL; 396 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 397 return -EINVAL; 398 399 new = prepare_creds(); 400 if (!new) 401 return -ENOMEM; 402 old = current_cred(); 403 404 retval = -EPERM; 405 if (rgid != (gid_t) -1) { 406 if (gid_eq(old->gid, krgid) || 407 gid_eq(old->egid, krgid) || 408 ns_capable_setid(old->user_ns, CAP_SETGID)) 409 new->gid = krgid; 410 else 411 goto error; 412 } 413 if (egid != (gid_t) -1) { 414 if (gid_eq(old->gid, kegid) || 415 gid_eq(old->egid, kegid) || 416 gid_eq(old->sgid, kegid) || 417 ns_capable_setid(old->user_ns, CAP_SETGID)) 418 new->egid = kegid; 419 else 420 goto error; 421 } 422 423 if (rgid != (gid_t) -1 || 424 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid))) 425 new->sgid = new->egid; 426 new->fsgid = new->egid; 427 428 retval = security_task_fix_setgid(new, old, LSM_SETID_RE); 429 if (retval < 0) 430 goto error; 431 432 return commit_creds(new); 433 434 error: 435 abort_creds(new); 436 return retval; 437 } 438 439 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) 440 { 441 return __sys_setregid(rgid, egid); 442 } 443 444 /* 445 * setgid() is implemented like SysV w/ SAVED_IDS 446 * 447 * SMP: Same implicit races as above. 448 */ 449 long __sys_setgid(gid_t gid) 450 { 451 struct user_namespace *ns = current_user_ns(); 452 const struct cred *old; 453 struct cred *new; 454 int retval; 455 kgid_t kgid; 456 457 kgid = make_kgid(ns, gid); 458 if (!gid_valid(kgid)) 459 return -EINVAL; 460 461 new = prepare_creds(); 462 if (!new) 463 return -ENOMEM; 464 old = current_cred(); 465 466 retval = -EPERM; 467 if (ns_capable_setid(old->user_ns, CAP_SETGID)) 468 new->gid = new->egid = new->sgid = new->fsgid = kgid; 469 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid)) 470 new->egid = new->fsgid = kgid; 471 else 472 goto error; 473 474 retval = security_task_fix_setgid(new, old, LSM_SETID_ID); 475 if (retval < 0) 476 goto error; 477 478 return commit_creds(new); 479 480 error: 481 abort_creds(new); 482 return retval; 483 } 484 485 SYSCALL_DEFINE1(setgid, gid_t, gid) 486 { 487 return __sys_setgid(gid); 488 } 489 490 /* 491 * change the user struct in a credentials set to match the new UID 492 */ 493 static int set_user(struct cred *new) 494 { 495 struct user_struct *new_user; 496 497 new_user = alloc_uid(new->uid); 498 if (!new_user) 499 return -EAGAIN; 500 501 free_uid(new->user); 502 new->user = new_user; 503 return 0; 504 } 505 506 static void flag_nproc_exceeded(struct cred *new) 507 { 508 if (new->ucounts == current_ucounts()) 509 return; 510 511 /* 512 * We don't fail in case of NPROC limit excess here because too many 513 * poorly written programs don't check set*uid() return code, assuming 514 * it never fails if called by root. We may still enforce NPROC limit 515 * for programs doing set*uid()+execve() by harmlessly deferring the 516 * failure to the execve() stage. 517 */ 518 if (is_rlimit_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) && 519 new->user != INIT_USER) 520 current->flags |= PF_NPROC_EXCEEDED; 521 else 522 current->flags &= ~PF_NPROC_EXCEEDED; 523 } 524 525 /* 526 * Unprivileged users may change the real uid to the effective uid 527 * or vice versa. (BSD-style) 528 * 529 * If you set the real uid at all, or set the effective uid to a value not 530 * equal to the real uid, then the saved uid is set to the new effective uid. 531 * 532 * This makes it possible for a setuid program to completely drop its 533 * privileges, which is often a useful assertion to make when you are doing 534 * a security audit over a program. 535 * 536 * The general idea is that a program which uses just setreuid() will be 537 * 100% compatible with BSD. A program which uses just setuid() will be 538 * 100% compatible with POSIX with saved IDs. 539 */ 540 long __sys_setreuid(uid_t ruid, uid_t euid) 541 { 542 struct user_namespace *ns = current_user_ns(); 543 const struct cred *old; 544 struct cred *new; 545 int retval; 546 kuid_t kruid, keuid; 547 548 kruid = make_kuid(ns, ruid); 549 keuid = make_kuid(ns, euid); 550 551 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 552 return -EINVAL; 553 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 554 return -EINVAL; 555 556 new = prepare_creds(); 557 if (!new) 558 return -ENOMEM; 559 old = current_cred(); 560 561 retval = -EPERM; 562 if (ruid != (uid_t) -1) { 563 new->uid = kruid; 564 if (!uid_eq(old->uid, kruid) && 565 !uid_eq(old->euid, kruid) && 566 !ns_capable_setid(old->user_ns, CAP_SETUID)) 567 goto error; 568 } 569 570 if (euid != (uid_t) -1) { 571 new->euid = keuid; 572 if (!uid_eq(old->uid, keuid) && 573 !uid_eq(old->euid, keuid) && 574 !uid_eq(old->suid, keuid) && 575 !ns_capable_setid(old->user_ns, CAP_SETUID)) 576 goto error; 577 } 578 579 if (!uid_eq(new->uid, old->uid)) { 580 retval = set_user(new); 581 if (retval < 0) 582 goto error; 583 } 584 if (ruid != (uid_t) -1 || 585 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid))) 586 new->suid = new->euid; 587 new->fsuid = new->euid; 588 589 retval = security_task_fix_setuid(new, old, LSM_SETID_RE); 590 if (retval < 0) 591 goto error; 592 593 retval = set_cred_ucounts(new); 594 if (retval < 0) 595 goto error; 596 597 flag_nproc_exceeded(new); 598 return commit_creds(new); 599 600 error: 601 abort_creds(new); 602 return retval; 603 } 604 605 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) 606 { 607 return __sys_setreuid(ruid, euid); 608 } 609 610 /* 611 * setuid() is implemented like SysV with SAVED_IDS 612 * 613 * Note that SAVED_ID's is deficient in that a setuid root program 614 * like sendmail, for example, cannot set its uid to be a normal 615 * user and then switch back, because if you're root, setuid() sets 616 * the saved uid too. If you don't like this, blame the bright people 617 * in the POSIX committee and/or USG. Note that the BSD-style setreuid() 618 * will allow a root program to temporarily drop privileges and be able to 619 * regain them by swapping the real and effective uid. 620 */ 621 long __sys_setuid(uid_t uid) 622 { 623 struct user_namespace *ns = current_user_ns(); 624 const struct cred *old; 625 struct cred *new; 626 int retval; 627 kuid_t kuid; 628 629 kuid = make_kuid(ns, uid); 630 if (!uid_valid(kuid)) 631 return -EINVAL; 632 633 new = prepare_creds(); 634 if (!new) 635 return -ENOMEM; 636 old = current_cred(); 637 638 retval = -EPERM; 639 if (ns_capable_setid(old->user_ns, CAP_SETUID)) { 640 new->suid = new->uid = kuid; 641 if (!uid_eq(kuid, old->uid)) { 642 retval = set_user(new); 643 if (retval < 0) 644 goto error; 645 } 646 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) { 647 goto error; 648 } 649 650 new->fsuid = new->euid = kuid; 651 652 retval = security_task_fix_setuid(new, old, LSM_SETID_ID); 653 if (retval < 0) 654 goto error; 655 656 retval = set_cred_ucounts(new); 657 if (retval < 0) 658 goto error; 659 660 flag_nproc_exceeded(new); 661 return commit_creds(new); 662 663 error: 664 abort_creds(new); 665 return retval; 666 } 667 668 SYSCALL_DEFINE1(setuid, uid_t, uid) 669 { 670 return __sys_setuid(uid); 671 } 672 673 674 /* 675 * This function implements a generic ability to update ruid, euid, 676 * and suid. This allows you to implement the 4.4 compatible seteuid(). 677 */ 678 long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) 679 { 680 struct user_namespace *ns = current_user_ns(); 681 const struct cred *old; 682 struct cred *new; 683 int retval; 684 kuid_t kruid, keuid, ksuid; 685 bool ruid_new, euid_new, suid_new; 686 687 kruid = make_kuid(ns, ruid); 688 keuid = make_kuid(ns, euid); 689 ksuid = make_kuid(ns, suid); 690 691 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 692 return -EINVAL; 693 694 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 695 return -EINVAL; 696 697 if ((suid != (uid_t) -1) && !uid_valid(ksuid)) 698 return -EINVAL; 699 700 old = current_cred(); 701 702 /* check for no-op */ 703 if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) && 704 (euid == (uid_t) -1 || (uid_eq(keuid, old->euid) && 705 uid_eq(keuid, old->fsuid))) && 706 (suid == (uid_t) -1 || uid_eq(ksuid, old->suid))) 707 return 0; 708 709 ruid_new = ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && 710 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid); 711 euid_new = euid != (uid_t) -1 && !uid_eq(keuid, old->uid) && 712 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid); 713 suid_new = suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) && 714 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid); 715 if ((ruid_new || euid_new || suid_new) && 716 !ns_capable_setid(old->user_ns, CAP_SETUID)) 717 return -EPERM; 718 719 new = prepare_creds(); 720 if (!new) 721 return -ENOMEM; 722 723 if (ruid != (uid_t) -1) { 724 new->uid = kruid; 725 if (!uid_eq(kruid, old->uid)) { 726 retval = set_user(new); 727 if (retval < 0) 728 goto error; 729 } 730 } 731 if (euid != (uid_t) -1) 732 new->euid = keuid; 733 if (suid != (uid_t) -1) 734 new->suid = ksuid; 735 new->fsuid = new->euid; 736 737 retval = security_task_fix_setuid(new, old, LSM_SETID_RES); 738 if (retval < 0) 739 goto error; 740 741 retval = set_cred_ucounts(new); 742 if (retval < 0) 743 goto error; 744 745 flag_nproc_exceeded(new); 746 return commit_creds(new); 747 748 error: 749 abort_creds(new); 750 return retval; 751 } 752 753 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 754 { 755 return __sys_setresuid(ruid, euid, suid); 756 } 757 758 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp) 759 { 760 const struct cred *cred = current_cred(); 761 int retval; 762 uid_t ruid, euid, suid; 763 764 ruid = from_kuid_munged(cred->user_ns, cred->uid); 765 euid = from_kuid_munged(cred->user_ns, cred->euid); 766 suid = from_kuid_munged(cred->user_ns, cred->suid); 767 768 retval = put_user(ruid, ruidp); 769 if (!retval) { 770 retval = put_user(euid, euidp); 771 if (!retval) 772 return put_user(suid, suidp); 773 } 774 return retval; 775 } 776 777 /* 778 * Same as above, but for rgid, egid, sgid. 779 */ 780 long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) 781 { 782 struct user_namespace *ns = current_user_ns(); 783 const struct cred *old; 784 struct cred *new; 785 int retval; 786 kgid_t krgid, kegid, ksgid; 787 bool rgid_new, egid_new, sgid_new; 788 789 krgid = make_kgid(ns, rgid); 790 kegid = make_kgid(ns, egid); 791 ksgid = make_kgid(ns, sgid); 792 793 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 794 return -EINVAL; 795 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 796 return -EINVAL; 797 if ((sgid != (gid_t) -1) && !gid_valid(ksgid)) 798 return -EINVAL; 799 800 old = current_cred(); 801 802 /* check for no-op */ 803 if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) && 804 (egid == (gid_t) -1 || (gid_eq(kegid, old->egid) && 805 gid_eq(kegid, old->fsgid))) && 806 (sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid))) 807 return 0; 808 809 rgid_new = rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && 810 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid); 811 egid_new = egid != (gid_t) -1 && !gid_eq(kegid, old->gid) && 812 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid); 813 sgid_new = sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) && 814 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid); 815 if ((rgid_new || egid_new || sgid_new) && 816 !ns_capable_setid(old->user_ns, CAP_SETGID)) 817 return -EPERM; 818 819 new = prepare_creds(); 820 if (!new) 821 return -ENOMEM; 822 823 if (rgid != (gid_t) -1) 824 new->gid = krgid; 825 if (egid != (gid_t) -1) 826 new->egid = kegid; 827 if (sgid != (gid_t) -1) 828 new->sgid = ksgid; 829 new->fsgid = new->egid; 830 831 retval = security_task_fix_setgid(new, old, LSM_SETID_RES); 832 if (retval < 0) 833 goto error; 834 835 return commit_creds(new); 836 837 error: 838 abort_creds(new); 839 return retval; 840 } 841 842 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 843 { 844 return __sys_setresgid(rgid, egid, sgid); 845 } 846 847 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp) 848 { 849 const struct cred *cred = current_cred(); 850 int retval; 851 gid_t rgid, egid, sgid; 852 853 rgid = from_kgid_munged(cred->user_ns, cred->gid); 854 egid = from_kgid_munged(cred->user_ns, cred->egid); 855 sgid = from_kgid_munged(cred->user_ns, cred->sgid); 856 857 retval = put_user(rgid, rgidp); 858 if (!retval) { 859 retval = put_user(egid, egidp); 860 if (!retval) 861 retval = put_user(sgid, sgidp); 862 } 863 864 return retval; 865 } 866 867 868 /* 869 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This 870 * is used for "access()" and for the NFS daemon (letting nfsd stay at 871 * whatever uid it wants to). It normally shadows "euid", except when 872 * explicitly set by setfsuid() or for access.. 873 */ 874 long __sys_setfsuid(uid_t uid) 875 { 876 const struct cred *old; 877 struct cred *new; 878 uid_t old_fsuid; 879 kuid_t kuid; 880 881 old = current_cred(); 882 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid); 883 884 kuid = make_kuid(old->user_ns, uid); 885 if (!uid_valid(kuid)) 886 return old_fsuid; 887 888 new = prepare_creds(); 889 if (!new) 890 return old_fsuid; 891 892 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) || 893 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) || 894 ns_capable_setid(old->user_ns, CAP_SETUID)) { 895 if (!uid_eq(kuid, old->fsuid)) { 896 new->fsuid = kuid; 897 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) 898 goto change_okay; 899 } 900 } 901 902 abort_creds(new); 903 return old_fsuid; 904 905 change_okay: 906 commit_creds(new); 907 return old_fsuid; 908 } 909 910 SYSCALL_DEFINE1(setfsuid, uid_t, uid) 911 { 912 return __sys_setfsuid(uid); 913 } 914 915 /* 916 * Samma pÃ¥ svenska.. 917 */ 918 long __sys_setfsgid(gid_t gid) 919 { 920 const struct cred *old; 921 struct cred *new; 922 gid_t old_fsgid; 923 kgid_t kgid; 924 925 old = current_cred(); 926 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid); 927 928 kgid = make_kgid(old->user_ns, gid); 929 if (!gid_valid(kgid)) 930 return old_fsgid; 931 932 new = prepare_creds(); 933 if (!new) 934 return old_fsgid; 935 936 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) || 937 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) || 938 ns_capable_setid(old->user_ns, CAP_SETGID)) { 939 if (!gid_eq(kgid, old->fsgid)) { 940 new->fsgid = kgid; 941 if (security_task_fix_setgid(new,old,LSM_SETID_FS) == 0) 942 goto change_okay; 943 } 944 } 945 946 abort_creds(new); 947 return old_fsgid; 948 949 change_okay: 950 commit_creds(new); 951 return old_fsgid; 952 } 953 954 SYSCALL_DEFINE1(setfsgid, gid_t, gid) 955 { 956 return __sys_setfsgid(gid); 957 } 958 #endif /* CONFIG_MULTIUSER */ 959 960 /** 961 * sys_getpid - return the thread group id of the current process 962 * 963 * Note, despite the name, this returns the tgid not the pid. The tgid and 964 * the pid are identical unless CLONE_THREAD was specified on clone() in 965 * which case the tgid is the same in all threads of the same group. 966 * 967 * This is SMP safe as current->tgid does not change. 968 */ 969 SYSCALL_DEFINE0(getpid) 970 { 971 return task_tgid_vnr(current); 972 } 973 974 /* Thread ID - the internal kernel "pid" */ 975 SYSCALL_DEFINE0(gettid) 976 { 977 return task_pid_vnr(current); 978 } 979 980 /* 981 * Accessing ->real_parent is not SMP-safe, it could 982 * change from under us. However, we can use a stale 983 * value of ->real_parent under rcu_read_lock(), see 984 * release_task()->call_rcu(delayed_put_task_struct). 985 */ 986 SYSCALL_DEFINE0(getppid) 987 { 988 int pid; 989 990 rcu_read_lock(); 991 pid = task_tgid_vnr(rcu_dereference(current->real_parent)); 992 rcu_read_unlock(); 993 994 return pid; 995 } 996 997 SYSCALL_DEFINE0(getuid) 998 { 999 /* Only we change this so SMP safe */ 1000 return from_kuid_munged(current_user_ns(), current_uid()); 1001 } 1002 1003 SYSCALL_DEFINE0(geteuid) 1004 { 1005 /* Only we change this so SMP safe */ 1006 return from_kuid_munged(current_user_ns(), current_euid()); 1007 } 1008 1009 SYSCALL_DEFINE0(getgid) 1010 { 1011 /* Only we change this so SMP safe */ 1012 return from_kgid_munged(current_user_ns(), current_gid()); 1013 } 1014 1015 SYSCALL_DEFINE0(getegid) 1016 { 1017 /* Only we change this so SMP safe */ 1018 return from_kgid_munged(current_user_ns(), current_egid()); 1019 } 1020 1021 static void do_sys_times(struct tms *tms) 1022 { 1023 u64 tgutime, tgstime, cutime, cstime; 1024 1025 thread_group_cputime_adjusted(current, &tgutime, &tgstime); 1026 cutime = current->signal->cutime; 1027 cstime = current->signal->cstime; 1028 tms->tms_utime = nsec_to_clock_t(tgutime); 1029 tms->tms_stime = nsec_to_clock_t(tgstime); 1030 tms->tms_cutime = nsec_to_clock_t(cutime); 1031 tms->tms_cstime = nsec_to_clock_t(cstime); 1032 } 1033 1034 SYSCALL_DEFINE1(times, struct tms __user *, tbuf) 1035 { 1036 if (tbuf) { 1037 struct tms tmp; 1038 1039 do_sys_times(&tmp); 1040 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) 1041 return -EFAULT; 1042 } 1043 force_successful_syscall_return(); 1044 return (long) jiffies_64_to_clock_t(get_jiffies_64()); 1045 } 1046 1047 #ifdef CONFIG_COMPAT 1048 static compat_clock_t clock_t_to_compat_clock_t(clock_t x) 1049 { 1050 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); 1051 } 1052 1053 COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf) 1054 { 1055 if (tbuf) { 1056 struct tms tms; 1057 struct compat_tms tmp; 1058 1059 do_sys_times(&tms); 1060 /* Convert our struct tms to the compat version. */ 1061 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); 1062 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); 1063 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); 1064 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); 1065 if (copy_to_user(tbuf, &tmp, sizeof(tmp))) 1066 return -EFAULT; 1067 } 1068 force_successful_syscall_return(); 1069 return compat_jiffies_to_clock_t(jiffies); 1070 } 1071 #endif 1072 1073 /* 1074 * This needs some heavy checking ... 1075 * I just haven't the stomach for it. I also don't fully 1076 * understand sessions/pgrp etc. Let somebody who does explain it. 1077 * 1078 * OK, I think I have the protection semantics right.... this is really 1079 * only important on a multi-user system anyway, to make sure one user 1080 * can't send a signal to a process owned by another. -TYT, 12/12/91 1081 * 1082 * !PF_FORKNOEXEC check to conform completely to POSIX. 1083 */ 1084 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) 1085 { 1086 struct task_struct *p; 1087 struct task_struct *group_leader = current->group_leader; 1088 struct pid *pgrp; 1089 int err; 1090 1091 if (!pid) 1092 pid = task_pid_vnr(group_leader); 1093 if (!pgid) 1094 pgid = pid; 1095 if (pgid < 0) 1096 return -EINVAL; 1097 rcu_read_lock(); 1098 1099 /* From this point forward we keep holding onto the tasklist lock 1100 * so that our parent does not change from under us. -DaveM 1101 */ 1102 write_lock_irq(&tasklist_lock); 1103 1104 err = -ESRCH; 1105 p = find_task_by_vpid(pid); 1106 if (!p) 1107 goto out; 1108 1109 err = -EINVAL; 1110 if (!thread_group_leader(p)) 1111 goto out; 1112 1113 if (same_thread_group(p->real_parent, group_leader)) { 1114 err = -EPERM; 1115 if (task_session(p) != task_session(group_leader)) 1116 goto out; 1117 err = -EACCES; 1118 if (!(p->flags & PF_FORKNOEXEC)) 1119 goto out; 1120 } else { 1121 err = -ESRCH; 1122 if (p != group_leader) 1123 goto out; 1124 } 1125 1126 err = -EPERM; 1127 if (p->signal->leader) 1128 goto out; 1129 1130 pgrp = task_pid(p); 1131 if (pgid != pid) { 1132 struct task_struct *g; 1133 1134 pgrp = find_vpid(pgid); 1135 g = pid_task(pgrp, PIDTYPE_PGID); 1136 if (!g || task_session(g) != task_session(group_leader)) 1137 goto out; 1138 } 1139 1140 err = security_task_setpgid(p, pgid); 1141 if (err) 1142 goto out; 1143 1144 if (task_pgrp(p) != pgrp) 1145 change_pid(p, PIDTYPE_PGID, pgrp); 1146 1147 err = 0; 1148 out: 1149 /* All paths lead to here, thus we are safe. -DaveM */ 1150 write_unlock_irq(&tasklist_lock); 1151 rcu_read_unlock(); 1152 return err; 1153 } 1154 1155 static int do_getpgid(pid_t pid) 1156 { 1157 struct task_struct *p; 1158 struct pid *grp; 1159 int retval; 1160 1161 rcu_read_lock(); 1162 if (!pid) 1163 grp = task_pgrp(current); 1164 else { 1165 retval = -ESRCH; 1166 p = find_task_by_vpid(pid); 1167 if (!p) 1168 goto out; 1169 grp = task_pgrp(p); 1170 if (!grp) 1171 goto out; 1172 1173 retval = security_task_getpgid(p); 1174 if (retval) 1175 goto out; 1176 } 1177 retval = pid_vnr(grp); 1178 out: 1179 rcu_read_unlock(); 1180 return retval; 1181 } 1182 1183 SYSCALL_DEFINE1(getpgid, pid_t, pid) 1184 { 1185 return do_getpgid(pid); 1186 } 1187 1188 #ifdef __ARCH_WANT_SYS_GETPGRP 1189 1190 SYSCALL_DEFINE0(getpgrp) 1191 { 1192 return do_getpgid(0); 1193 } 1194 1195 #endif 1196 1197 SYSCALL_DEFINE1(getsid, pid_t, pid) 1198 { 1199 struct task_struct *p; 1200 struct pid *sid; 1201 int retval; 1202 1203 rcu_read_lock(); 1204 if (!pid) 1205 sid = task_session(current); 1206 else { 1207 retval = -ESRCH; 1208 p = find_task_by_vpid(pid); 1209 if (!p) 1210 goto out; 1211 sid = task_session(p); 1212 if (!sid) 1213 goto out; 1214 1215 retval = security_task_getsid(p); 1216 if (retval) 1217 goto out; 1218 } 1219 retval = pid_vnr(sid); 1220 out: 1221 rcu_read_unlock(); 1222 return retval; 1223 } 1224 1225 static void set_special_pids(struct pid *pid) 1226 { 1227 struct task_struct *curr = current->group_leader; 1228 1229 if (task_session(curr) != pid) 1230 change_pid(curr, PIDTYPE_SID, pid); 1231 1232 if (task_pgrp(curr) != pid) 1233 change_pid(curr, PIDTYPE_PGID, pid); 1234 } 1235 1236 int ksys_setsid(void) 1237 { 1238 struct task_struct *group_leader = current->group_leader; 1239 struct pid *sid = task_pid(group_leader); 1240 pid_t session = pid_vnr(sid); 1241 int err = -EPERM; 1242 1243 write_lock_irq(&tasklist_lock); 1244 /* Fail if I am already a session leader */ 1245 if (group_leader->signal->leader) 1246 goto out; 1247 1248 /* Fail if a process group id already exists that equals the 1249 * proposed session id. 1250 */ 1251 if (pid_task(sid, PIDTYPE_PGID)) 1252 goto out; 1253 1254 group_leader->signal->leader = 1; 1255 set_special_pids(sid); 1256 1257 proc_clear_tty(group_leader); 1258 1259 err = session; 1260 out: 1261 write_unlock_irq(&tasklist_lock); 1262 if (err > 0) { 1263 proc_sid_connector(group_leader); 1264 sched_autogroup_create_attach(group_leader); 1265 } 1266 return err; 1267 } 1268 1269 SYSCALL_DEFINE0(setsid) 1270 { 1271 return ksys_setsid(); 1272 } 1273 1274 DECLARE_RWSEM(uts_sem); 1275 1276 #ifdef COMPAT_UTS_MACHINE 1277 #define override_architecture(name) \ 1278 (personality(current->personality) == PER_LINUX32 && \ 1279 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ 1280 sizeof(COMPAT_UTS_MACHINE))) 1281 #else 1282 #define override_architecture(name) 0 1283 #endif 1284 1285 /* 1286 * Work around broken programs that cannot handle "Linux 3.0". 1287 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 1288 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be 1289 * 2.6.60. 1290 */ 1291 static int override_release(char __user *release, size_t len) 1292 { 1293 int ret = 0; 1294 1295 if (current->personality & UNAME26) { 1296 const char *rest = UTS_RELEASE; 1297 char buf[65] = { 0 }; 1298 int ndots = 0; 1299 unsigned v; 1300 size_t copy; 1301 1302 while (*rest) { 1303 if (*rest == '.' && ++ndots >= 3) 1304 break; 1305 if (!isdigit(*rest) && *rest != '.') 1306 break; 1307 rest++; 1308 } 1309 v = LINUX_VERSION_PATCHLEVEL + 60; 1310 copy = clamp_t(size_t, len, 1, sizeof(buf)); 1311 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); 1312 ret = copy_to_user(release, buf, copy + 1); 1313 } 1314 return ret; 1315 } 1316 1317 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) 1318 { 1319 struct new_utsname tmp; 1320 1321 down_read(&uts_sem); 1322 memcpy(&tmp, utsname(), sizeof(tmp)); 1323 up_read(&uts_sem); 1324 if (copy_to_user(name, &tmp, sizeof(tmp))) 1325 return -EFAULT; 1326 1327 if (override_release(name->release, sizeof(name->release))) 1328 return -EFAULT; 1329 if (override_architecture(name)) 1330 return -EFAULT; 1331 return 0; 1332 } 1333 1334 #ifdef __ARCH_WANT_SYS_OLD_UNAME 1335 /* 1336 * Old cruft 1337 */ 1338 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) 1339 { 1340 struct old_utsname tmp; 1341 1342 if (!name) 1343 return -EFAULT; 1344 1345 down_read(&uts_sem); 1346 memcpy(&tmp, utsname(), sizeof(tmp)); 1347 up_read(&uts_sem); 1348 if (copy_to_user(name, &tmp, sizeof(tmp))) 1349 return -EFAULT; 1350 1351 if (override_release(name->release, sizeof(name->release))) 1352 return -EFAULT; 1353 if (override_architecture(name)) 1354 return -EFAULT; 1355 return 0; 1356 } 1357 1358 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) 1359 { 1360 struct oldold_utsname tmp; 1361 1362 if (!name) 1363 return -EFAULT; 1364 1365 memset(&tmp, 0, sizeof(tmp)); 1366 1367 down_read(&uts_sem); 1368 memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN); 1369 memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN); 1370 memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN); 1371 memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN); 1372 memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN); 1373 up_read(&uts_sem); 1374 if (copy_to_user(name, &tmp, sizeof(tmp))) 1375 return -EFAULT; 1376 1377 if (override_architecture(name)) 1378 return -EFAULT; 1379 if (override_release(name->release, sizeof(name->release))) 1380 return -EFAULT; 1381 return 0; 1382 } 1383 #endif 1384 1385 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) 1386 { 1387 int errno; 1388 char tmp[__NEW_UTS_LEN]; 1389 1390 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1391 return -EPERM; 1392 1393 if (len < 0 || len > __NEW_UTS_LEN) 1394 return -EINVAL; 1395 errno = -EFAULT; 1396 if (!copy_from_user(tmp, name, len)) { 1397 struct new_utsname *u; 1398 1399 add_device_randomness(tmp, len); 1400 down_write(&uts_sem); 1401 u = utsname(); 1402 memcpy(u->nodename, tmp, len); 1403 memset(u->nodename + len, 0, sizeof(u->nodename) - len); 1404 errno = 0; 1405 uts_proc_notify(UTS_PROC_HOSTNAME); 1406 up_write(&uts_sem); 1407 } 1408 return errno; 1409 } 1410 1411 #ifdef __ARCH_WANT_SYS_GETHOSTNAME 1412 1413 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) 1414 { 1415 int i; 1416 struct new_utsname *u; 1417 char tmp[__NEW_UTS_LEN + 1]; 1418 1419 if (len < 0) 1420 return -EINVAL; 1421 down_read(&uts_sem); 1422 u = utsname(); 1423 i = 1 + strlen(u->nodename); 1424 if (i > len) 1425 i = len; 1426 memcpy(tmp, u->nodename, i); 1427 up_read(&uts_sem); 1428 if (copy_to_user(name, tmp, i)) 1429 return -EFAULT; 1430 return 0; 1431 } 1432 1433 #endif 1434 1435 /* 1436 * Only setdomainname; getdomainname can be implemented by calling 1437 * uname() 1438 */ 1439 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) 1440 { 1441 int errno; 1442 char tmp[__NEW_UTS_LEN]; 1443 1444 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1445 return -EPERM; 1446 if (len < 0 || len > __NEW_UTS_LEN) 1447 return -EINVAL; 1448 1449 errno = -EFAULT; 1450 if (!copy_from_user(tmp, name, len)) { 1451 struct new_utsname *u; 1452 1453 add_device_randomness(tmp, len); 1454 down_write(&uts_sem); 1455 u = utsname(); 1456 memcpy(u->domainname, tmp, len); 1457 memset(u->domainname + len, 0, sizeof(u->domainname) - len); 1458 errno = 0; 1459 uts_proc_notify(UTS_PROC_DOMAINNAME); 1460 up_write(&uts_sem); 1461 } 1462 return errno; 1463 } 1464 1465 /* make sure you are allowed to change @tsk limits before calling this */ 1466 static int do_prlimit(struct task_struct *tsk, unsigned int resource, 1467 struct rlimit *new_rlim, struct rlimit *old_rlim) 1468 { 1469 struct rlimit *rlim; 1470 int retval = 0; 1471 1472 if (resource >= RLIM_NLIMITS) 1473 return -EINVAL; 1474 resource = array_index_nospec(resource, RLIM_NLIMITS); 1475 1476 if (new_rlim) { 1477 if (new_rlim->rlim_cur > new_rlim->rlim_max) 1478 return -EINVAL; 1479 if (resource == RLIMIT_NOFILE && 1480 new_rlim->rlim_max > sysctl_nr_open) 1481 return -EPERM; 1482 } 1483 1484 /* Holding a refcount on tsk protects tsk->signal from disappearing. */ 1485 rlim = tsk->signal->rlim + resource; 1486 task_lock(tsk->group_leader); 1487 if (new_rlim) { 1488 /* 1489 * Keep the capable check against init_user_ns until cgroups can 1490 * contain all limits. 1491 */ 1492 if (new_rlim->rlim_max > rlim->rlim_max && 1493 !capable(CAP_SYS_RESOURCE)) 1494 retval = -EPERM; 1495 if (!retval) 1496 retval = security_task_setrlimit(tsk, resource, new_rlim); 1497 } 1498 if (!retval) { 1499 if (old_rlim) 1500 *old_rlim = *rlim; 1501 if (new_rlim) 1502 *rlim = *new_rlim; 1503 } 1504 task_unlock(tsk->group_leader); 1505 1506 /* 1507 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not 1508 * infinite. In case of RLIM_INFINITY the posix CPU timer code 1509 * ignores the rlimit. 1510 */ 1511 if (!retval && new_rlim && resource == RLIMIT_CPU && 1512 new_rlim->rlim_cur != RLIM_INFINITY && 1513 IS_ENABLED(CONFIG_POSIX_TIMERS)) { 1514 /* 1515 * update_rlimit_cpu can fail if the task is exiting, but there 1516 * may be other tasks in the thread group that are not exiting, 1517 * and they need their cpu timers adjusted. 1518 * 1519 * The group_leader is the last task to be released, so if we 1520 * cannot update_rlimit_cpu on it, then the entire process is 1521 * exiting and we do not need to update at all. 1522 */ 1523 update_rlimit_cpu(tsk->group_leader, new_rlim->rlim_cur); 1524 } 1525 1526 return retval; 1527 } 1528 1529 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1530 { 1531 struct rlimit value; 1532 int ret; 1533 1534 ret = do_prlimit(current, resource, NULL, &value); 1535 if (!ret) 1536 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; 1537 1538 return ret; 1539 } 1540 1541 #ifdef CONFIG_COMPAT 1542 1543 COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource, 1544 struct compat_rlimit __user *, rlim) 1545 { 1546 struct rlimit r; 1547 struct compat_rlimit r32; 1548 1549 if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit))) 1550 return -EFAULT; 1551 1552 if (r32.rlim_cur == COMPAT_RLIM_INFINITY) 1553 r.rlim_cur = RLIM_INFINITY; 1554 else 1555 r.rlim_cur = r32.rlim_cur; 1556 if (r32.rlim_max == COMPAT_RLIM_INFINITY) 1557 r.rlim_max = RLIM_INFINITY; 1558 else 1559 r.rlim_max = r32.rlim_max; 1560 return do_prlimit(current, resource, &r, NULL); 1561 } 1562 1563 COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource, 1564 struct compat_rlimit __user *, rlim) 1565 { 1566 struct rlimit r; 1567 int ret; 1568 1569 ret = do_prlimit(current, resource, NULL, &r); 1570 if (!ret) { 1571 struct compat_rlimit r32; 1572 if (r.rlim_cur > COMPAT_RLIM_INFINITY) 1573 r32.rlim_cur = COMPAT_RLIM_INFINITY; 1574 else 1575 r32.rlim_cur = r.rlim_cur; 1576 if (r.rlim_max > COMPAT_RLIM_INFINITY) 1577 r32.rlim_max = COMPAT_RLIM_INFINITY; 1578 else 1579 r32.rlim_max = r.rlim_max; 1580 1581 if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit))) 1582 return -EFAULT; 1583 } 1584 return ret; 1585 } 1586 1587 #endif 1588 1589 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT 1590 1591 /* 1592 * Back compatibility for getrlimit. Needed for some apps. 1593 */ 1594 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, 1595 struct rlimit __user *, rlim) 1596 { 1597 struct rlimit x; 1598 if (resource >= RLIM_NLIMITS) 1599 return -EINVAL; 1600 1601 resource = array_index_nospec(resource, RLIM_NLIMITS); 1602 task_lock(current->group_leader); 1603 x = current->signal->rlim[resource]; 1604 task_unlock(current->group_leader); 1605 if (x.rlim_cur > 0x7FFFFFFF) 1606 x.rlim_cur = 0x7FFFFFFF; 1607 if (x.rlim_max > 0x7FFFFFFF) 1608 x.rlim_max = 0x7FFFFFFF; 1609 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0; 1610 } 1611 1612 #ifdef CONFIG_COMPAT 1613 COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, 1614 struct compat_rlimit __user *, rlim) 1615 { 1616 struct rlimit r; 1617 1618 if (resource >= RLIM_NLIMITS) 1619 return -EINVAL; 1620 1621 resource = array_index_nospec(resource, RLIM_NLIMITS); 1622 task_lock(current->group_leader); 1623 r = current->signal->rlim[resource]; 1624 task_unlock(current->group_leader); 1625 if (r.rlim_cur > 0x7FFFFFFF) 1626 r.rlim_cur = 0x7FFFFFFF; 1627 if (r.rlim_max > 0x7FFFFFFF) 1628 r.rlim_max = 0x7FFFFFFF; 1629 1630 if (put_user(r.rlim_cur, &rlim->rlim_cur) || 1631 put_user(r.rlim_max, &rlim->rlim_max)) 1632 return -EFAULT; 1633 return 0; 1634 } 1635 #endif 1636 1637 #endif 1638 1639 static inline bool rlim64_is_infinity(__u64 rlim64) 1640 { 1641 #if BITS_PER_LONG < 64 1642 return rlim64 >= ULONG_MAX; 1643 #else 1644 return rlim64 == RLIM64_INFINITY; 1645 #endif 1646 } 1647 1648 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64) 1649 { 1650 if (rlim->rlim_cur == RLIM_INFINITY) 1651 rlim64->rlim_cur = RLIM64_INFINITY; 1652 else 1653 rlim64->rlim_cur = rlim->rlim_cur; 1654 if (rlim->rlim_max == RLIM_INFINITY) 1655 rlim64->rlim_max = RLIM64_INFINITY; 1656 else 1657 rlim64->rlim_max = rlim->rlim_max; 1658 } 1659 1660 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim) 1661 { 1662 if (rlim64_is_infinity(rlim64->rlim_cur)) 1663 rlim->rlim_cur = RLIM_INFINITY; 1664 else 1665 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur; 1666 if (rlim64_is_infinity(rlim64->rlim_max)) 1667 rlim->rlim_max = RLIM_INFINITY; 1668 else 1669 rlim->rlim_max = (unsigned long)rlim64->rlim_max; 1670 } 1671 1672 /* rcu lock must be held */ 1673 static int check_prlimit_permission(struct task_struct *task, 1674 unsigned int flags) 1675 { 1676 const struct cred *cred = current_cred(), *tcred; 1677 bool id_match; 1678 1679 if (current == task) 1680 return 0; 1681 1682 tcred = __task_cred(task); 1683 id_match = (uid_eq(cred->uid, tcred->euid) && 1684 uid_eq(cred->uid, tcred->suid) && 1685 uid_eq(cred->uid, tcred->uid) && 1686 gid_eq(cred->gid, tcred->egid) && 1687 gid_eq(cred->gid, tcred->sgid) && 1688 gid_eq(cred->gid, tcred->gid)); 1689 if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE)) 1690 return -EPERM; 1691 1692 return security_task_prlimit(cred, tcred, flags); 1693 } 1694 1695 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, 1696 const struct rlimit64 __user *, new_rlim, 1697 struct rlimit64 __user *, old_rlim) 1698 { 1699 struct rlimit64 old64, new64; 1700 struct rlimit old, new; 1701 struct task_struct *tsk; 1702 unsigned int checkflags = 0; 1703 int ret; 1704 1705 if (old_rlim) 1706 checkflags |= LSM_PRLIMIT_READ; 1707 1708 if (new_rlim) { 1709 if (copy_from_user(&new64, new_rlim, sizeof(new64))) 1710 return -EFAULT; 1711 rlim64_to_rlim(&new64, &new); 1712 checkflags |= LSM_PRLIMIT_WRITE; 1713 } 1714 1715 rcu_read_lock(); 1716 tsk = pid ? find_task_by_vpid(pid) : current; 1717 if (!tsk) { 1718 rcu_read_unlock(); 1719 return -ESRCH; 1720 } 1721 ret = check_prlimit_permission(tsk, checkflags); 1722 if (ret) { 1723 rcu_read_unlock(); 1724 return ret; 1725 } 1726 get_task_struct(tsk); 1727 rcu_read_unlock(); 1728 1729 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, 1730 old_rlim ? &old : NULL); 1731 1732 if (!ret && old_rlim) { 1733 rlim_to_rlim64(&old, &old64); 1734 if (copy_to_user(old_rlim, &old64, sizeof(old64))) 1735 ret = -EFAULT; 1736 } 1737 1738 put_task_struct(tsk); 1739 return ret; 1740 } 1741 1742 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1743 { 1744 struct rlimit new_rlim; 1745 1746 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1747 return -EFAULT; 1748 return do_prlimit(current, resource, &new_rlim, NULL); 1749 } 1750 1751 /* 1752 * It would make sense to put struct rusage in the task_struct, 1753 * except that would make the task_struct be *really big*. After 1754 * task_struct gets moved into malloc'ed memory, it would 1755 * make sense to do this. It will make moving the rest of the information 1756 * a lot simpler! (Which we're not doing right now because we're not 1757 * measuring them yet). 1758 * 1759 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have 1760 * races with threads incrementing their own counters. But since word 1761 * reads are atomic, we either get new values or old values and we don't 1762 * care which for the sums. We always take the siglock to protect reading 1763 * the c* fields from p->signal from races with exit.c updating those 1764 * fields when reaping, so a sample either gets all the additions of a 1765 * given child after it's reaped, or none so this sample is before reaping. 1766 * 1767 * Locking: 1768 * We need to take the siglock for CHILDEREN, SELF and BOTH 1769 * for the cases current multithreaded, non-current single threaded 1770 * non-current multithreaded. Thread traversal is now safe with 1771 * the siglock held. 1772 * Strictly speaking, we donot need to take the siglock if we are current and 1773 * single threaded, as no one else can take our signal_struct away, no one 1774 * else can reap the children to update signal->c* counters, and no one else 1775 * can race with the signal-> fields. If we do not take any lock, the 1776 * signal-> fields could be read out of order while another thread was just 1777 * exiting. So we should place a read memory barrier when we avoid the lock. 1778 * On the writer side, write memory barrier is implied in __exit_signal 1779 * as __exit_signal releases the siglock spinlock after updating the signal-> 1780 * fields. But we don't do this yet to keep things simple. 1781 * 1782 */ 1783 1784 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r) 1785 { 1786 r->ru_nvcsw += t->nvcsw; 1787 r->ru_nivcsw += t->nivcsw; 1788 r->ru_minflt += t->min_flt; 1789 r->ru_majflt += t->maj_flt; 1790 r->ru_inblock += task_io_get_inblock(t); 1791 r->ru_oublock += task_io_get_oublock(t); 1792 } 1793 1794 void getrusage(struct task_struct *p, int who, struct rusage *r) 1795 { 1796 struct task_struct *t; 1797 unsigned long flags; 1798 u64 tgutime, tgstime, utime, stime; 1799 unsigned long maxrss; 1800 struct mm_struct *mm; 1801 struct signal_struct *sig = p->signal; 1802 unsigned int seq = 0; 1803 1804 retry: 1805 memset(r, 0, sizeof(*r)); 1806 utime = stime = 0; 1807 maxrss = 0; 1808 1809 if (who == RUSAGE_THREAD) { 1810 task_cputime_adjusted(current, &utime, &stime); 1811 accumulate_thread_rusage(p, r); 1812 maxrss = sig->maxrss; 1813 goto out_thread; 1814 } 1815 1816 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq); 1817 1818 switch (who) { 1819 case RUSAGE_BOTH: 1820 case RUSAGE_CHILDREN: 1821 utime = sig->cutime; 1822 stime = sig->cstime; 1823 r->ru_nvcsw = sig->cnvcsw; 1824 r->ru_nivcsw = sig->cnivcsw; 1825 r->ru_minflt = sig->cmin_flt; 1826 r->ru_majflt = sig->cmaj_flt; 1827 r->ru_inblock = sig->cinblock; 1828 r->ru_oublock = sig->coublock; 1829 maxrss = sig->cmaxrss; 1830 1831 if (who == RUSAGE_CHILDREN) 1832 break; 1833 fallthrough; 1834 1835 case RUSAGE_SELF: 1836 r->ru_nvcsw += sig->nvcsw; 1837 r->ru_nivcsw += sig->nivcsw; 1838 r->ru_minflt += sig->min_flt; 1839 r->ru_majflt += sig->maj_flt; 1840 r->ru_inblock += sig->inblock; 1841 r->ru_oublock += sig->oublock; 1842 if (maxrss < sig->maxrss) 1843 maxrss = sig->maxrss; 1844 1845 rcu_read_lock(); 1846 __for_each_thread(sig, t) 1847 accumulate_thread_rusage(t, r); 1848 rcu_read_unlock(); 1849 1850 break; 1851 1852 default: 1853 BUG(); 1854 } 1855 1856 if (need_seqretry(&sig->stats_lock, seq)) { 1857 seq = 1; 1858 goto retry; 1859 } 1860 done_seqretry_irqrestore(&sig->stats_lock, seq, flags); 1861 1862 if (who == RUSAGE_CHILDREN) 1863 goto out_children; 1864 1865 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1866 utime += tgutime; 1867 stime += tgstime; 1868 1869 out_thread: 1870 mm = get_task_mm(p); 1871 if (mm) { 1872 setmax_mm_hiwater_rss(&maxrss, mm); 1873 mmput(mm); 1874 } 1875 1876 out_children: 1877 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ 1878 r->ru_utime = ns_to_kernel_old_timeval(utime); 1879 r->ru_stime = ns_to_kernel_old_timeval(stime); 1880 } 1881 1882 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru) 1883 { 1884 struct rusage r; 1885 1886 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1887 who != RUSAGE_THREAD) 1888 return -EINVAL; 1889 1890 getrusage(current, who, &r); 1891 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1892 } 1893 1894 #ifdef CONFIG_COMPAT 1895 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru) 1896 { 1897 struct rusage r; 1898 1899 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1900 who != RUSAGE_THREAD) 1901 return -EINVAL; 1902 1903 getrusage(current, who, &r); 1904 return put_compat_rusage(&r, ru); 1905 } 1906 #endif 1907 1908 SYSCALL_DEFINE1(umask, int, mask) 1909 { 1910 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); 1911 return mask; 1912 } 1913 1914 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) 1915 { 1916 CLASS(fd, exe)(fd); 1917 struct inode *inode; 1918 int err; 1919 1920 if (fd_empty(exe)) 1921 return -EBADF; 1922 1923 inode = file_inode(fd_file(exe)); 1924 1925 /* 1926 * Because the original mm->exe_file points to executable file, make 1927 * sure that this one is executable as well, to avoid breaking an 1928 * overall picture. 1929 */ 1930 if (!S_ISREG(inode->i_mode) || path_noexec(&fd_file(exe)->f_path)) 1931 return -EACCES; 1932 1933 err = file_permission(fd_file(exe), MAY_EXEC); 1934 if (err) 1935 return err; 1936 1937 return replace_mm_exe_file(mm, fd_file(exe)); 1938 } 1939 1940 /* 1941 * Check arithmetic relations of passed addresses. 1942 * 1943 * WARNING: we don't require any capability here so be very careful 1944 * in what is allowed for modification from userspace. 1945 */ 1946 static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map) 1947 { 1948 unsigned long mmap_max_addr = TASK_SIZE; 1949 int error = -EINVAL, i; 1950 1951 static const unsigned char offsets[] = { 1952 offsetof(struct prctl_mm_map, start_code), 1953 offsetof(struct prctl_mm_map, end_code), 1954 offsetof(struct prctl_mm_map, start_data), 1955 offsetof(struct prctl_mm_map, end_data), 1956 offsetof(struct prctl_mm_map, start_brk), 1957 offsetof(struct prctl_mm_map, brk), 1958 offsetof(struct prctl_mm_map, start_stack), 1959 offsetof(struct prctl_mm_map, arg_start), 1960 offsetof(struct prctl_mm_map, arg_end), 1961 offsetof(struct prctl_mm_map, env_start), 1962 offsetof(struct prctl_mm_map, env_end), 1963 }; 1964 1965 /* 1966 * Make sure the members are not somewhere outside 1967 * of allowed address space. 1968 */ 1969 for (i = 0; i < ARRAY_SIZE(offsets); i++) { 1970 u64 val = *(u64 *)((char *)prctl_map + offsets[i]); 1971 1972 if ((unsigned long)val >= mmap_max_addr || 1973 (unsigned long)val < mmap_min_addr) 1974 goto out; 1975 } 1976 1977 /* 1978 * Make sure the pairs are ordered. 1979 */ 1980 #define __prctl_check_order(__m1, __op, __m2) \ 1981 ((unsigned long)prctl_map->__m1 __op \ 1982 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL 1983 error = __prctl_check_order(start_code, <, end_code); 1984 error |= __prctl_check_order(start_data,<=, end_data); 1985 error |= __prctl_check_order(start_brk, <=, brk); 1986 error |= __prctl_check_order(arg_start, <=, arg_end); 1987 error |= __prctl_check_order(env_start, <=, env_end); 1988 if (error) 1989 goto out; 1990 #undef __prctl_check_order 1991 1992 error = -EINVAL; 1993 1994 /* 1995 * Neither we should allow to override limits if they set. 1996 */ 1997 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk, 1998 prctl_map->start_brk, prctl_map->end_data, 1999 prctl_map->start_data)) 2000 goto out; 2001 2002 error = 0; 2003 out: 2004 return error; 2005 } 2006 2007 #ifdef CONFIG_CHECKPOINT_RESTORE 2008 static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size) 2009 { 2010 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, }; 2011 unsigned long user_auxv[AT_VECTOR_SIZE]; 2012 struct mm_struct *mm = current->mm; 2013 int error; 2014 2015 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); 2016 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256); 2017 2018 if (opt == PR_SET_MM_MAP_SIZE) 2019 return put_user((unsigned int)sizeof(prctl_map), 2020 (unsigned int __user *)addr); 2021 2022 if (data_size != sizeof(prctl_map)) 2023 return -EINVAL; 2024 2025 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map))) 2026 return -EFAULT; 2027 2028 error = validate_prctl_map_addr(&prctl_map); 2029 if (error) 2030 return error; 2031 2032 if (prctl_map.auxv_size) { 2033 /* 2034 * Someone is trying to cheat the auxv vector. 2035 */ 2036 if (!prctl_map.auxv || 2037 prctl_map.auxv_size > sizeof(mm->saved_auxv)) 2038 return -EINVAL; 2039 2040 memset(user_auxv, 0, sizeof(user_auxv)); 2041 if (copy_from_user(user_auxv, 2042 (const void __user *)prctl_map.auxv, 2043 prctl_map.auxv_size)) 2044 return -EFAULT; 2045 2046 /* Last entry must be AT_NULL as specification requires */ 2047 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL; 2048 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL; 2049 } 2050 2051 if (prctl_map.exe_fd != (u32)-1) { 2052 /* 2053 * Check if the current user is checkpoint/restore capable. 2054 * At the time of this writing, it checks for CAP_SYS_ADMIN 2055 * or CAP_CHECKPOINT_RESTORE. 2056 * Note that a user with access to ptrace can masquerade an 2057 * arbitrary program as any executable, even setuid ones. 2058 * This may have implications in the tomoyo subsystem. 2059 */ 2060 if (!checkpoint_restore_ns_capable(current_user_ns())) 2061 return -EPERM; 2062 2063 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd); 2064 if (error) 2065 return error; 2066 } 2067 2068 /* 2069 * arg_lock protects concurrent updates but we still need mmap_lock for 2070 * read to exclude races with sys_brk. 2071 */ 2072 mmap_read_lock(mm); 2073 2074 /* 2075 * We don't validate if these members are pointing to 2076 * real present VMAs because application may have correspond 2077 * VMAs already unmapped and kernel uses these members for statistics 2078 * output in procfs mostly, except 2079 * 2080 * - @start_brk/@brk which are used in do_brk_flags but kernel lookups 2081 * for VMAs when updating these members so anything wrong written 2082 * here cause kernel to swear at userspace program but won't lead 2083 * to any problem in kernel itself 2084 */ 2085 2086 spin_lock(&mm->arg_lock); 2087 mm->start_code = prctl_map.start_code; 2088 mm->end_code = prctl_map.end_code; 2089 mm->start_data = prctl_map.start_data; 2090 mm->end_data = prctl_map.end_data; 2091 mm->start_brk = prctl_map.start_brk; 2092 mm->brk = prctl_map.brk; 2093 mm->start_stack = prctl_map.start_stack; 2094 mm->arg_start = prctl_map.arg_start; 2095 mm->arg_end = prctl_map.arg_end; 2096 mm->env_start = prctl_map.env_start; 2097 mm->env_end = prctl_map.env_end; 2098 spin_unlock(&mm->arg_lock); 2099 2100 /* 2101 * Note this update of @saved_auxv is lockless thus 2102 * if someone reads this member in procfs while we're 2103 * updating -- it may get partly updated results. It's 2104 * known and acceptable trade off: we leave it as is to 2105 * not introduce additional locks here making the kernel 2106 * more complex. 2107 */ 2108 if (prctl_map.auxv_size) 2109 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv)); 2110 2111 mmap_read_unlock(mm); 2112 return 0; 2113 } 2114 #endif /* CONFIG_CHECKPOINT_RESTORE */ 2115 2116 static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr, 2117 unsigned long len) 2118 { 2119 /* 2120 * This doesn't move the auxiliary vector itself since it's pinned to 2121 * mm_struct, but it permits filling the vector with new values. It's 2122 * up to the caller to provide sane values here, otherwise userspace 2123 * tools which use this vector might be unhappy. 2124 */ 2125 unsigned long user_auxv[AT_VECTOR_SIZE] = {}; 2126 2127 if (len > sizeof(user_auxv)) 2128 return -EINVAL; 2129 2130 if (copy_from_user(user_auxv, (const void __user *)addr, len)) 2131 return -EFAULT; 2132 2133 /* Make sure the last entry is always AT_NULL */ 2134 user_auxv[AT_VECTOR_SIZE - 2] = 0; 2135 user_auxv[AT_VECTOR_SIZE - 1] = 0; 2136 2137 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); 2138 2139 task_lock(current); 2140 memcpy(mm->saved_auxv, user_auxv, len); 2141 task_unlock(current); 2142 2143 return 0; 2144 } 2145 2146 static int prctl_set_mm(int opt, unsigned long addr, 2147 unsigned long arg4, unsigned long arg5) 2148 { 2149 struct mm_struct *mm = current->mm; 2150 struct prctl_mm_map prctl_map = { 2151 .auxv = NULL, 2152 .auxv_size = 0, 2153 .exe_fd = -1, 2154 }; 2155 struct vm_area_struct *vma; 2156 int error; 2157 2158 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV && 2159 opt != PR_SET_MM_MAP && 2160 opt != PR_SET_MM_MAP_SIZE))) 2161 return -EINVAL; 2162 2163 #ifdef CONFIG_CHECKPOINT_RESTORE 2164 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE) 2165 return prctl_set_mm_map(opt, (const void __user *)addr, arg4); 2166 #endif 2167 2168 if (!capable(CAP_SYS_RESOURCE)) 2169 return -EPERM; 2170 2171 if (opt == PR_SET_MM_EXE_FILE) 2172 return prctl_set_mm_exe_file(mm, (unsigned int)addr); 2173 2174 if (opt == PR_SET_MM_AUXV) 2175 return prctl_set_auxv(mm, addr, arg4); 2176 2177 if (addr >= TASK_SIZE || addr < mmap_min_addr) 2178 return -EINVAL; 2179 2180 error = -EINVAL; 2181 2182 /* 2183 * arg_lock protects concurrent updates of arg boundaries, we need 2184 * mmap_lock for a) concurrent sys_brk, b) finding VMA for addr 2185 * validation. 2186 */ 2187 mmap_read_lock(mm); 2188 vma = find_vma(mm, addr); 2189 2190 spin_lock(&mm->arg_lock); 2191 prctl_map.start_code = mm->start_code; 2192 prctl_map.end_code = mm->end_code; 2193 prctl_map.start_data = mm->start_data; 2194 prctl_map.end_data = mm->end_data; 2195 prctl_map.start_brk = mm->start_brk; 2196 prctl_map.brk = mm->brk; 2197 prctl_map.start_stack = mm->start_stack; 2198 prctl_map.arg_start = mm->arg_start; 2199 prctl_map.arg_end = mm->arg_end; 2200 prctl_map.env_start = mm->env_start; 2201 prctl_map.env_end = mm->env_end; 2202 2203 switch (opt) { 2204 case PR_SET_MM_START_CODE: 2205 prctl_map.start_code = addr; 2206 break; 2207 case PR_SET_MM_END_CODE: 2208 prctl_map.end_code = addr; 2209 break; 2210 case PR_SET_MM_START_DATA: 2211 prctl_map.start_data = addr; 2212 break; 2213 case PR_SET_MM_END_DATA: 2214 prctl_map.end_data = addr; 2215 break; 2216 case PR_SET_MM_START_STACK: 2217 prctl_map.start_stack = addr; 2218 break; 2219 case PR_SET_MM_START_BRK: 2220 prctl_map.start_brk = addr; 2221 break; 2222 case PR_SET_MM_BRK: 2223 prctl_map.brk = addr; 2224 break; 2225 case PR_SET_MM_ARG_START: 2226 prctl_map.arg_start = addr; 2227 break; 2228 case PR_SET_MM_ARG_END: 2229 prctl_map.arg_end = addr; 2230 break; 2231 case PR_SET_MM_ENV_START: 2232 prctl_map.env_start = addr; 2233 break; 2234 case PR_SET_MM_ENV_END: 2235 prctl_map.env_end = addr; 2236 break; 2237 default: 2238 goto out; 2239 } 2240 2241 error = validate_prctl_map_addr(&prctl_map); 2242 if (error) 2243 goto out; 2244 2245 switch (opt) { 2246 /* 2247 * If command line arguments and environment 2248 * are placed somewhere else on stack, we can 2249 * set them up here, ARG_START/END to setup 2250 * command line arguments and ENV_START/END 2251 * for environment. 2252 */ 2253 case PR_SET_MM_START_STACK: 2254 case PR_SET_MM_ARG_START: 2255 case PR_SET_MM_ARG_END: 2256 case PR_SET_MM_ENV_START: 2257 case PR_SET_MM_ENV_END: 2258 if (!vma) { 2259 error = -EFAULT; 2260 goto out; 2261 } 2262 } 2263 2264 mm->start_code = prctl_map.start_code; 2265 mm->end_code = prctl_map.end_code; 2266 mm->start_data = prctl_map.start_data; 2267 mm->end_data = prctl_map.end_data; 2268 mm->start_brk = prctl_map.start_brk; 2269 mm->brk = prctl_map.brk; 2270 mm->start_stack = prctl_map.start_stack; 2271 mm->arg_start = prctl_map.arg_start; 2272 mm->arg_end = prctl_map.arg_end; 2273 mm->env_start = prctl_map.env_start; 2274 mm->env_end = prctl_map.env_end; 2275 2276 error = 0; 2277 out: 2278 spin_unlock(&mm->arg_lock); 2279 mmap_read_unlock(mm); 2280 return error; 2281 } 2282 2283 #ifdef CONFIG_CHECKPOINT_RESTORE 2284 static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr) 2285 { 2286 return put_user(me->clear_child_tid, tid_addr); 2287 } 2288 #else 2289 static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr) 2290 { 2291 return -EINVAL; 2292 } 2293 #endif 2294 2295 static int propagate_has_child_subreaper(struct task_struct *p, void *data) 2296 { 2297 /* 2298 * If task has has_child_subreaper - all its descendants 2299 * already have these flag too and new descendants will 2300 * inherit it on fork, skip them. 2301 * 2302 * If we've found child_reaper - skip descendants in 2303 * it's subtree as they will never get out pidns. 2304 */ 2305 if (p->signal->has_child_subreaper || 2306 is_child_reaper(task_pid(p))) 2307 return 0; 2308 2309 p->signal->has_child_subreaper = 1; 2310 return 1; 2311 } 2312 2313 int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which) 2314 { 2315 return -EINVAL; 2316 } 2317 2318 int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which, 2319 unsigned long ctrl) 2320 { 2321 return -EINVAL; 2322 } 2323 2324 int __weak arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status) 2325 { 2326 return -EINVAL; 2327 } 2328 2329 int __weak arch_set_shadow_stack_status(struct task_struct *t, unsigned long status) 2330 { 2331 return -EINVAL; 2332 } 2333 2334 int __weak arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status) 2335 { 2336 return -EINVAL; 2337 } 2338 2339 #define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE) 2340 2341 #ifdef CONFIG_ANON_VMA_NAME 2342 2343 #define ANON_VMA_NAME_MAX_LEN 80 2344 #define ANON_VMA_NAME_INVALID_CHARS "\\`$[]" 2345 2346 static inline bool is_valid_name_char(char ch) 2347 { 2348 /* printable ascii characters, excluding ANON_VMA_NAME_INVALID_CHARS */ 2349 return ch > 0x1f && ch < 0x7f && 2350 !strchr(ANON_VMA_NAME_INVALID_CHARS, ch); 2351 } 2352 2353 static int prctl_set_vma(unsigned long opt, unsigned long addr, 2354 unsigned long size, unsigned long arg) 2355 { 2356 struct mm_struct *mm = current->mm; 2357 const char __user *uname; 2358 struct anon_vma_name *anon_name = NULL; 2359 int error; 2360 2361 switch (opt) { 2362 case PR_SET_VMA_ANON_NAME: 2363 uname = (const char __user *)arg; 2364 if (uname) { 2365 char *name, *pch; 2366 2367 name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN); 2368 if (IS_ERR(name)) 2369 return PTR_ERR(name); 2370 2371 for (pch = name; *pch != '\0'; pch++) { 2372 if (!is_valid_name_char(*pch)) { 2373 kfree(name); 2374 return -EINVAL; 2375 } 2376 } 2377 /* anon_vma has its own copy */ 2378 anon_name = anon_vma_name_alloc(name); 2379 kfree(name); 2380 if (!anon_name) 2381 return -ENOMEM; 2382 2383 } 2384 2385 mmap_write_lock(mm); 2386 error = madvise_set_anon_name(mm, addr, size, anon_name); 2387 mmap_write_unlock(mm); 2388 anon_vma_name_put(anon_name); 2389 break; 2390 default: 2391 error = -EINVAL; 2392 } 2393 2394 return error; 2395 } 2396 2397 #else /* CONFIG_ANON_VMA_NAME */ 2398 static int prctl_set_vma(unsigned long opt, unsigned long start, 2399 unsigned long size, unsigned long arg) 2400 { 2401 return -EINVAL; 2402 } 2403 #endif /* CONFIG_ANON_VMA_NAME */ 2404 2405 static inline unsigned long get_current_mdwe(void) 2406 { 2407 unsigned long ret = 0; 2408 2409 if (test_bit(MMF_HAS_MDWE, ¤t->mm->flags)) 2410 ret |= PR_MDWE_REFUSE_EXEC_GAIN; 2411 if (test_bit(MMF_HAS_MDWE_NO_INHERIT, ¤t->mm->flags)) 2412 ret |= PR_MDWE_NO_INHERIT; 2413 2414 return ret; 2415 } 2416 2417 static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3, 2418 unsigned long arg4, unsigned long arg5) 2419 { 2420 unsigned long current_bits; 2421 2422 if (arg3 || arg4 || arg5) 2423 return -EINVAL; 2424 2425 if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN | PR_MDWE_NO_INHERIT)) 2426 return -EINVAL; 2427 2428 /* NO_INHERIT only makes sense with REFUSE_EXEC_GAIN */ 2429 if (bits & PR_MDWE_NO_INHERIT && !(bits & PR_MDWE_REFUSE_EXEC_GAIN)) 2430 return -EINVAL; 2431 2432 /* 2433 * EOPNOTSUPP might be more appropriate here in principle, but 2434 * existing userspace depends on EINVAL specifically. 2435 */ 2436 if (!arch_memory_deny_write_exec_supported()) 2437 return -EINVAL; 2438 2439 current_bits = get_current_mdwe(); 2440 if (current_bits && current_bits != bits) 2441 return -EPERM; /* Cannot unset the flags */ 2442 2443 if (bits & PR_MDWE_NO_INHERIT) 2444 set_bit(MMF_HAS_MDWE_NO_INHERIT, ¤t->mm->flags); 2445 if (bits & PR_MDWE_REFUSE_EXEC_GAIN) 2446 set_bit(MMF_HAS_MDWE, ¤t->mm->flags); 2447 2448 return 0; 2449 } 2450 2451 static inline int prctl_get_mdwe(unsigned long arg2, unsigned long arg3, 2452 unsigned long arg4, unsigned long arg5) 2453 { 2454 if (arg2 || arg3 || arg4 || arg5) 2455 return -EINVAL; 2456 return get_current_mdwe(); 2457 } 2458 2459 static int prctl_get_auxv(void __user *addr, unsigned long len) 2460 { 2461 struct mm_struct *mm = current->mm; 2462 unsigned long size = min_t(unsigned long, sizeof(mm->saved_auxv), len); 2463 2464 if (size && copy_to_user(addr, mm->saved_auxv, size)) 2465 return -EFAULT; 2466 return sizeof(mm->saved_auxv); 2467 } 2468 2469 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, 2470 unsigned long, arg4, unsigned long, arg5) 2471 { 2472 struct task_struct *me = current; 2473 unsigned char comm[sizeof(me->comm)]; 2474 long error; 2475 2476 error = security_task_prctl(option, arg2, arg3, arg4, arg5); 2477 if (error != -ENOSYS) 2478 return error; 2479 2480 error = 0; 2481 switch (option) { 2482 case PR_SET_PDEATHSIG: 2483 if (!valid_signal(arg2)) { 2484 error = -EINVAL; 2485 break; 2486 } 2487 me->pdeath_signal = arg2; 2488 break; 2489 case PR_GET_PDEATHSIG: 2490 error = put_user(me->pdeath_signal, (int __user *)arg2); 2491 break; 2492 case PR_GET_DUMPABLE: 2493 error = get_dumpable(me->mm); 2494 break; 2495 case PR_SET_DUMPABLE: 2496 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) { 2497 error = -EINVAL; 2498 break; 2499 } 2500 set_dumpable(me->mm, arg2); 2501 break; 2502 2503 case PR_SET_UNALIGN: 2504 error = SET_UNALIGN_CTL(me, arg2); 2505 break; 2506 case PR_GET_UNALIGN: 2507 error = GET_UNALIGN_CTL(me, arg2); 2508 break; 2509 case PR_SET_FPEMU: 2510 error = SET_FPEMU_CTL(me, arg2); 2511 break; 2512 case PR_GET_FPEMU: 2513 error = GET_FPEMU_CTL(me, arg2); 2514 break; 2515 case PR_SET_FPEXC: 2516 error = SET_FPEXC_CTL(me, arg2); 2517 break; 2518 case PR_GET_FPEXC: 2519 error = GET_FPEXC_CTL(me, arg2); 2520 break; 2521 case PR_GET_TIMING: 2522 error = PR_TIMING_STATISTICAL; 2523 break; 2524 case PR_SET_TIMING: 2525 if (arg2 != PR_TIMING_STATISTICAL) 2526 error = -EINVAL; 2527 break; 2528 case PR_SET_NAME: 2529 comm[sizeof(me->comm) - 1] = 0; 2530 if (strncpy_from_user(comm, (char __user *)arg2, 2531 sizeof(me->comm) - 1) < 0) 2532 return -EFAULT; 2533 set_task_comm(me, comm); 2534 proc_comm_connector(me); 2535 break; 2536 case PR_GET_NAME: 2537 get_task_comm(comm, me); 2538 if (copy_to_user((char __user *)arg2, comm, sizeof(comm))) 2539 return -EFAULT; 2540 break; 2541 case PR_GET_ENDIAN: 2542 error = GET_ENDIAN(me, arg2); 2543 break; 2544 case PR_SET_ENDIAN: 2545 error = SET_ENDIAN(me, arg2); 2546 break; 2547 case PR_GET_SECCOMP: 2548 error = prctl_get_seccomp(); 2549 break; 2550 case PR_SET_SECCOMP: 2551 error = prctl_set_seccomp(arg2, (char __user *)arg3); 2552 break; 2553 case PR_GET_TSC: 2554 error = GET_TSC_CTL(arg2); 2555 break; 2556 case PR_SET_TSC: 2557 error = SET_TSC_CTL(arg2); 2558 break; 2559 case PR_TASK_PERF_EVENTS_DISABLE: 2560 error = perf_event_task_disable(); 2561 break; 2562 case PR_TASK_PERF_EVENTS_ENABLE: 2563 error = perf_event_task_enable(); 2564 break; 2565 case PR_GET_TIMERSLACK: 2566 if (current->timer_slack_ns > ULONG_MAX) 2567 error = ULONG_MAX; 2568 else 2569 error = current->timer_slack_ns; 2570 break; 2571 case PR_SET_TIMERSLACK: 2572 if (rt_or_dl_task_policy(current)) 2573 break; 2574 if (arg2 <= 0) 2575 current->timer_slack_ns = 2576 current->default_timer_slack_ns; 2577 else 2578 current->timer_slack_ns = arg2; 2579 break; 2580 case PR_MCE_KILL: 2581 if (arg4 | arg5) 2582 return -EINVAL; 2583 switch (arg2) { 2584 case PR_MCE_KILL_CLEAR: 2585 if (arg3 != 0) 2586 return -EINVAL; 2587 current->flags &= ~PF_MCE_PROCESS; 2588 break; 2589 case PR_MCE_KILL_SET: 2590 current->flags |= PF_MCE_PROCESS; 2591 if (arg3 == PR_MCE_KILL_EARLY) 2592 current->flags |= PF_MCE_EARLY; 2593 else if (arg3 == PR_MCE_KILL_LATE) 2594 current->flags &= ~PF_MCE_EARLY; 2595 else if (arg3 == PR_MCE_KILL_DEFAULT) 2596 current->flags &= 2597 ~(PF_MCE_EARLY|PF_MCE_PROCESS); 2598 else 2599 return -EINVAL; 2600 break; 2601 default: 2602 return -EINVAL; 2603 } 2604 break; 2605 case PR_MCE_KILL_GET: 2606 if (arg2 | arg3 | arg4 | arg5) 2607 return -EINVAL; 2608 if (current->flags & PF_MCE_PROCESS) 2609 error = (current->flags & PF_MCE_EARLY) ? 2610 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; 2611 else 2612 error = PR_MCE_KILL_DEFAULT; 2613 break; 2614 case PR_SET_MM: 2615 error = prctl_set_mm(arg2, arg3, arg4, arg5); 2616 break; 2617 case PR_GET_TID_ADDRESS: 2618 error = prctl_get_tid_address(me, (int __user * __user *)arg2); 2619 break; 2620 case PR_SET_CHILD_SUBREAPER: 2621 me->signal->is_child_subreaper = !!arg2; 2622 if (!arg2) 2623 break; 2624 2625 walk_process_tree(me, propagate_has_child_subreaper, NULL); 2626 break; 2627 case PR_GET_CHILD_SUBREAPER: 2628 error = put_user(me->signal->is_child_subreaper, 2629 (int __user *)arg2); 2630 break; 2631 case PR_SET_NO_NEW_PRIVS: 2632 if (arg2 != 1 || arg3 || arg4 || arg5) 2633 return -EINVAL; 2634 2635 task_set_no_new_privs(current); 2636 break; 2637 case PR_GET_NO_NEW_PRIVS: 2638 if (arg2 || arg3 || arg4 || arg5) 2639 return -EINVAL; 2640 return task_no_new_privs(current) ? 1 : 0; 2641 case PR_GET_THP_DISABLE: 2642 if (arg2 || arg3 || arg4 || arg5) 2643 return -EINVAL; 2644 error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags); 2645 break; 2646 case PR_SET_THP_DISABLE: 2647 if (arg3 || arg4 || arg5) 2648 return -EINVAL; 2649 if (mmap_write_lock_killable(me->mm)) 2650 return -EINTR; 2651 if (arg2) 2652 set_bit(MMF_DISABLE_THP, &me->mm->flags); 2653 else 2654 clear_bit(MMF_DISABLE_THP, &me->mm->flags); 2655 mmap_write_unlock(me->mm); 2656 break; 2657 case PR_MPX_ENABLE_MANAGEMENT: 2658 case PR_MPX_DISABLE_MANAGEMENT: 2659 /* No longer implemented: */ 2660 return -EINVAL; 2661 case PR_SET_FP_MODE: 2662 error = SET_FP_MODE(me, arg2); 2663 break; 2664 case PR_GET_FP_MODE: 2665 error = GET_FP_MODE(me); 2666 break; 2667 case PR_SVE_SET_VL: 2668 error = SVE_SET_VL(arg2); 2669 break; 2670 case PR_SVE_GET_VL: 2671 error = SVE_GET_VL(); 2672 break; 2673 case PR_SME_SET_VL: 2674 error = SME_SET_VL(arg2); 2675 break; 2676 case PR_SME_GET_VL: 2677 error = SME_GET_VL(); 2678 break; 2679 case PR_GET_SPECULATION_CTRL: 2680 if (arg3 || arg4 || arg5) 2681 return -EINVAL; 2682 error = arch_prctl_spec_ctrl_get(me, arg2); 2683 break; 2684 case PR_SET_SPECULATION_CTRL: 2685 if (arg4 || arg5) 2686 return -EINVAL; 2687 error = arch_prctl_spec_ctrl_set(me, arg2, arg3); 2688 break; 2689 case PR_PAC_RESET_KEYS: 2690 if (arg3 || arg4 || arg5) 2691 return -EINVAL; 2692 error = PAC_RESET_KEYS(me, arg2); 2693 break; 2694 case PR_PAC_SET_ENABLED_KEYS: 2695 if (arg4 || arg5) 2696 return -EINVAL; 2697 error = PAC_SET_ENABLED_KEYS(me, arg2, arg3); 2698 break; 2699 case PR_PAC_GET_ENABLED_KEYS: 2700 if (arg2 || arg3 || arg4 || arg5) 2701 return -EINVAL; 2702 error = PAC_GET_ENABLED_KEYS(me); 2703 break; 2704 case PR_SET_TAGGED_ADDR_CTRL: 2705 if (arg3 || arg4 || arg5) 2706 return -EINVAL; 2707 error = SET_TAGGED_ADDR_CTRL(arg2); 2708 break; 2709 case PR_GET_TAGGED_ADDR_CTRL: 2710 if (arg2 || arg3 || arg4 || arg5) 2711 return -EINVAL; 2712 error = GET_TAGGED_ADDR_CTRL(); 2713 break; 2714 case PR_SET_IO_FLUSHER: 2715 if (!capable(CAP_SYS_RESOURCE)) 2716 return -EPERM; 2717 2718 if (arg3 || arg4 || arg5) 2719 return -EINVAL; 2720 2721 if (arg2 == 1) 2722 current->flags |= PR_IO_FLUSHER; 2723 else if (!arg2) 2724 current->flags &= ~PR_IO_FLUSHER; 2725 else 2726 return -EINVAL; 2727 break; 2728 case PR_GET_IO_FLUSHER: 2729 if (!capable(CAP_SYS_RESOURCE)) 2730 return -EPERM; 2731 2732 if (arg2 || arg3 || arg4 || arg5) 2733 return -EINVAL; 2734 2735 error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER; 2736 break; 2737 case PR_SET_SYSCALL_USER_DISPATCH: 2738 error = set_syscall_user_dispatch(arg2, arg3, arg4, 2739 (char __user *) arg5); 2740 break; 2741 #ifdef CONFIG_SCHED_CORE 2742 case PR_SCHED_CORE: 2743 error = sched_core_share_pid(arg2, arg3, arg4, arg5); 2744 break; 2745 #endif 2746 case PR_SET_MDWE: 2747 error = prctl_set_mdwe(arg2, arg3, arg4, arg5); 2748 break; 2749 case PR_GET_MDWE: 2750 error = prctl_get_mdwe(arg2, arg3, arg4, arg5); 2751 break; 2752 case PR_PPC_GET_DEXCR: 2753 if (arg3 || arg4 || arg5) 2754 return -EINVAL; 2755 error = PPC_GET_DEXCR_ASPECT(me, arg2); 2756 break; 2757 case PR_PPC_SET_DEXCR: 2758 if (arg4 || arg5) 2759 return -EINVAL; 2760 error = PPC_SET_DEXCR_ASPECT(me, arg2, arg3); 2761 break; 2762 case PR_SET_VMA: 2763 error = prctl_set_vma(arg2, arg3, arg4, arg5); 2764 break; 2765 case PR_GET_AUXV: 2766 if (arg4 || arg5) 2767 return -EINVAL; 2768 error = prctl_get_auxv((void __user *)arg2, arg3); 2769 break; 2770 #ifdef CONFIG_KSM 2771 case PR_SET_MEMORY_MERGE: 2772 if (arg3 || arg4 || arg5) 2773 return -EINVAL; 2774 if (mmap_write_lock_killable(me->mm)) 2775 return -EINTR; 2776 2777 if (arg2) 2778 error = ksm_enable_merge_any(me->mm); 2779 else 2780 error = ksm_disable_merge_any(me->mm); 2781 mmap_write_unlock(me->mm); 2782 break; 2783 case PR_GET_MEMORY_MERGE: 2784 if (arg2 || arg3 || arg4 || arg5) 2785 return -EINVAL; 2786 2787 error = !!test_bit(MMF_VM_MERGE_ANY, &me->mm->flags); 2788 break; 2789 #endif 2790 case PR_RISCV_V_SET_CONTROL: 2791 error = RISCV_V_SET_CONTROL(arg2); 2792 break; 2793 case PR_RISCV_V_GET_CONTROL: 2794 error = RISCV_V_GET_CONTROL(); 2795 break; 2796 case PR_RISCV_SET_ICACHE_FLUSH_CTX: 2797 error = RISCV_SET_ICACHE_FLUSH_CTX(arg2, arg3); 2798 break; 2799 case PR_GET_SHADOW_STACK_STATUS: 2800 if (arg3 || arg4 || arg5) 2801 return -EINVAL; 2802 error = arch_get_shadow_stack_status(me, (unsigned long __user *) arg2); 2803 break; 2804 case PR_SET_SHADOW_STACK_STATUS: 2805 if (arg3 || arg4 || arg5) 2806 return -EINVAL; 2807 error = arch_set_shadow_stack_status(me, arg2); 2808 break; 2809 case PR_LOCK_SHADOW_STACK_STATUS: 2810 if (arg3 || arg4 || arg5) 2811 return -EINVAL; 2812 error = arch_lock_shadow_stack_status(me, arg2); 2813 break; 2814 default: 2815 trace_task_prctl_unknown(option, arg2, arg3, arg4, arg5); 2816 error = -EINVAL; 2817 break; 2818 } 2819 return error; 2820 } 2821 2822 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, 2823 struct getcpu_cache __user *, unused) 2824 { 2825 int err = 0; 2826 int cpu = raw_smp_processor_id(); 2827 2828 if (cpup) 2829 err |= put_user(cpu, cpup); 2830 if (nodep) 2831 err |= put_user(cpu_to_node(cpu), nodep); 2832 return err ? -EFAULT : 0; 2833 } 2834 2835 /** 2836 * do_sysinfo - fill in sysinfo struct 2837 * @info: pointer to buffer to fill 2838 */ 2839 static int do_sysinfo(struct sysinfo *info) 2840 { 2841 unsigned long mem_total, sav_total; 2842 unsigned int mem_unit, bitcount; 2843 struct timespec64 tp; 2844 2845 memset(info, 0, sizeof(struct sysinfo)); 2846 2847 ktime_get_boottime_ts64(&tp); 2848 timens_add_boottime(&tp); 2849 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); 2850 2851 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); 2852 2853 info->procs = nr_threads; 2854 2855 si_meminfo(info); 2856 si_swapinfo(info); 2857 2858 /* 2859 * If the sum of all the available memory (i.e. ram + swap) 2860 * is less than can be stored in a 32 bit unsigned long then 2861 * we can be binary compatible with 2.2.x kernels. If not, 2862 * well, in that case 2.2.x was broken anyways... 2863 * 2864 * -Erik Andersen <andersee@debian.org> 2865 */ 2866 2867 mem_total = info->totalram + info->totalswap; 2868 if (mem_total < info->totalram || mem_total < info->totalswap) 2869 goto out; 2870 bitcount = 0; 2871 mem_unit = info->mem_unit; 2872 while (mem_unit > 1) { 2873 bitcount++; 2874 mem_unit >>= 1; 2875 sav_total = mem_total; 2876 mem_total <<= 1; 2877 if (mem_total < sav_total) 2878 goto out; 2879 } 2880 2881 /* 2882 * If mem_total did not overflow, multiply all memory values by 2883 * info->mem_unit and set it to 1. This leaves things compatible 2884 * with 2.2.x, and also retains compatibility with earlier 2.4.x 2885 * kernels... 2886 */ 2887 2888 info->mem_unit = 1; 2889 info->totalram <<= bitcount; 2890 info->freeram <<= bitcount; 2891 info->sharedram <<= bitcount; 2892 info->bufferram <<= bitcount; 2893 info->totalswap <<= bitcount; 2894 info->freeswap <<= bitcount; 2895 info->totalhigh <<= bitcount; 2896 info->freehigh <<= bitcount; 2897 2898 out: 2899 return 0; 2900 } 2901 2902 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info) 2903 { 2904 struct sysinfo val; 2905 2906 do_sysinfo(&val); 2907 2908 if (copy_to_user(info, &val, sizeof(struct sysinfo))) 2909 return -EFAULT; 2910 2911 return 0; 2912 } 2913 2914 #ifdef CONFIG_COMPAT 2915 struct compat_sysinfo { 2916 s32 uptime; 2917 u32 loads[3]; 2918 u32 totalram; 2919 u32 freeram; 2920 u32 sharedram; 2921 u32 bufferram; 2922 u32 totalswap; 2923 u32 freeswap; 2924 u16 procs; 2925 u16 pad; 2926 u32 totalhigh; 2927 u32 freehigh; 2928 u32 mem_unit; 2929 char _f[20-2*sizeof(u32)-sizeof(int)]; 2930 }; 2931 2932 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info) 2933 { 2934 struct sysinfo s; 2935 struct compat_sysinfo s_32; 2936 2937 do_sysinfo(&s); 2938 2939 /* Check to see if any memory value is too large for 32-bit and scale 2940 * down if needed 2941 */ 2942 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) { 2943 int bitcount = 0; 2944 2945 while (s.mem_unit < PAGE_SIZE) { 2946 s.mem_unit <<= 1; 2947 bitcount++; 2948 } 2949 2950 s.totalram >>= bitcount; 2951 s.freeram >>= bitcount; 2952 s.sharedram >>= bitcount; 2953 s.bufferram >>= bitcount; 2954 s.totalswap >>= bitcount; 2955 s.freeswap >>= bitcount; 2956 s.totalhigh >>= bitcount; 2957 s.freehigh >>= bitcount; 2958 } 2959 2960 memset(&s_32, 0, sizeof(s_32)); 2961 s_32.uptime = s.uptime; 2962 s_32.loads[0] = s.loads[0]; 2963 s_32.loads[1] = s.loads[1]; 2964 s_32.loads[2] = s.loads[2]; 2965 s_32.totalram = s.totalram; 2966 s_32.freeram = s.freeram; 2967 s_32.sharedram = s.sharedram; 2968 s_32.bufferram = s.bufferram; 2969 s_32.totalswap = s.totalswap; 2970 s_32.freeswap = s.freeswap; 2971 s_32.procs = s.procs; 2972 s_32.totalhigh = s.totalhigh; 2973 s_32.freehigh = s.freehigh; 2974 s_32.mem_unit = s.mem_unit; 2975 if (copy_to_user(info, &s_32, sizeof(s_32))) 2976 return -EFAULT; 2977 return 0; 2978 } 2979 #endif /* CONFIG_COMPAT */ 2980