1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/kernel/sys.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/export.h> 9 #include <linux/mm.h> 10 #include <linux/mm_inline.h> 11 #include <linux/utsname.h> 12 #include <linux/mman.h> 13 #include <linux/reboot.h> 14 #include <linux/prctl.h> 15 #include <linux/highuid.h> 16 #include <linux/fs.h> 17 #include <linux/kmod.h> 18 #include <linux/ksm.h> 19 #include <linux/perf_event.h> 20 #include <linux/resource.h> 21 #include <linux/kernel.h> 22 #include <linux/workqueue.h> 23 #include <linux/capability.h> 24 #include <linux/device.h> 25 #include <linux/key.h> 26 #include <linux/times.h> 27 #include <linux/posix-timers.h> 28 #include <linux/security.h> 29 #include <linux/random.h> 30 #include <linux/suspend.h> 31 #include <linux/tty.h> 32 #include <linux/signal.h> 33 #include <linux/cn_proc.h> 34 #include <linux/getcpu.h> 35 #include <linux/task_io_accounting_ops.h> 36 #include <linux/seccomp.h> 37 #include <linux/cpu.h> 38 #include <linux/personality.h> 39 #include <linux/ptrace.h> 40 #include <linux/fs_struct.h> 41 #include <linux/file.h> 42 #include <linux/mount.h> 43 #include <linux/gfp.h> 44 #include <linux/syscore_ops.h> 45 #include <linux/version.h> 46 #include <linux/ctype.h> 47 #include <linux/syscall_user_dispatch.h> 48 49 #include <linux/compat.h> 50 #include <linux/syscalls.h> 51 #include <linux/kprobes.h> 52 #include <linux/user_namespace.h> 53 #include <linux/time_namespace.h> 54 #include <linux/binfmts.h> 55 56 #include <linux/sched.h> 57 #include <linux/sched/autogroup.h> 58 #include <linux/sched/loadavg.h> 59 #include <linux/sched/stat.h> 60 #include <linux/sched/mm.h> 61 #include <linux/sched/coredump.h> 62 #include <linux/sched/task.h> 63 #include <linux/sched/cputime.h> 64 #include <linux/rcupdate.h> 65 #include <linux/uidgid.h> 66 #include <linux/cred.h> 67 68 #include <linux/nospec.h> 69 70 #include <linux/kmsg_dump.h> 71 /* Move somewhere else to avoid recompiling? */ 72 #include <generated/utsrelease.h> 73 74 #include <linux/uaccess.h> 75 #include <asm/io.h> 76 #include <asm/unistd.h> 77 78 #include "uid16.h" 79 80 #ifndef SET_UNALIGN_CTL 81 # define SET_UNALIGN_CTL(a, b) (-EINVAL) 82 #endif 83 #ifndef GET_UNALIGN_CTL 84 # define GET_UNALIGN_CTL(a, b) (-EINVAL) 85 #endif 86 #ifndef SET_FPEMU_CTL 87 # define SET_FPEMU_CTL(a, b) (-EINVAL) 88 #endif 89 #ifndef GET_FPEMU_CTL 90 # define GET_FPEMU_CTL(a, b) (-EINVAL) 91 #endif 92 #ifndef SET_FPEXC_CTL 93 # define SET_FPEXC_CTL(a, b) (-EINVAL) 94 #endif 95 #ifndef GET_FPEXC_CTL 96 # define GET_FPEXC_CTL(a, b) (-EINVAL) 97 #endif 98 #ifndef GET_ENDIAN 99 # define GET_ENDIAN(a, b) (-EINVAL) 100 #endif 101 #ifndef SET_ENDIAN 102 # define SET_ENDIAN(a, b) (-EINVAL) 103 #endif 104 #ifndef GET_TSC_CTL 105 # define GET_TSC_CTL(a) (-EINVAL) 106 #endif 107 #ifndef SET_TSC_CTL 108 # define SET_TSC_CTL(a) (-EINVAL) 109 #endif 110 #ifndef GET_FP_MODE 111 # define GET_FP_MODE(a) (-EINVAL) 112 #endif 113 #ifndef SET_FP_MODE 114 # define SET_FP_MODE(a,b) (-EINVAL) 115 #endif 116 #ifndef SVE_SET_VL 117 # define SVE_SET_VL(a) (-EINVAL) 118 #endif 119 #ifndef SVE_GET_VL 120 # define SVE_GET_VL() (-EINVAL) 121 #endif 122 #ifndef SME_SET_VL 123 # define SME_SET_VL(a) (-EINVAL) 124 #endif 125 #ifndef SME_GET_VL 126 # define SME_GET_VL() (-EINVAL) 127 #endif 128 #ifndef PAC_RESET_KEYS 129 # define PAC_RESET_KEYS(a, b) (-EINVAL) 130 #endif 131 #ifndef PAC_SET_ENABLED_KEYS 132 # define PAC_SET_ENABLED_KEYS(a, b, c) (-EINVAL) 133 #endif 134 #ifndef PAC_GET_ENABLED_KEYS 135 # define PAC_GET_ENABLED_KEYS(a) (-EINVAL) 136 #endif 137 #ifndef SET_TAGGED_ADDR_CTRL 138 # define SET_TAGGED_ADDR_CTRL(a) (-EINVAL) 139 #endif 140 #ifndef GET_TAGGED_ADDR_CTRL 141 # define GET_TAGGED_ADDR_CTRL() (-EINVAL) 142 #endif 143 #ifndef RISCV_V_SET_CONTROL 144 # define RISCV_V_SET_CONTROL(a) (-EINVAL) 145 #endif 146 #ifndef RISCV_V_GET_CONTROL 147 # define RISCV_V_GET_CONTROL() (-EINVAL) 148 #endif 149 #ifndef RISCV_SET_ICACHE_FLUSH_CTX 150 # define RISCV_SET_ICACHE_FLUSH_CTX(a, b) (-EINVAL) 151 #endif 152 #ifndef PPC_GET_DEXCR_ASPECT 153 # define PPC_GET_DEXCR_ASPECT(a, b) (-EINVAL) 154 #endif 155 #ifndef PPC_SET_DEXCR_ASPECT 156 # define PPC_SET_DEXCR_ASPECT(a, b, c) (-EINVAL) 157 #endif 158 159 /* 160 * this is where the system-wide overflow UID and GID are defined, for 161 * architectures that now have 32-bit UID/GID but didn't in the past 162 */ 163 164 int overflowuid = DEFAULT_OVERFLOWUID; 165 int overflowgid = DEFAULT_OVERFLOWGID; 166 167 EXPORT_SYMBOL(overflowuid); 168 EXPORT_SYMBOL(overflowgid); 169 170 /* 171 * the same as above, but for filesystems which can only store a 16-bit 172 * UID and GID. as such, this is needed on all architectures 173 */ 174 175 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; 176 int fs_overflowgid = DEFAULT_FS_OVERFLOWGID; 177 178 EXPORT_SYMBOL(fs_overflowuid); 179 EXPORT_SYMBOL(fs_overflowgid); 180 181 /* 182 * Returns true if current's euid is same as p's uid or euid, 183 * or has CAP_SYS_NICE to p's user_ns. 184 * 185 * Called with rcu_read_lock, creds are safe 186 */ 187 static bool set_one_prio_perm(struct task_struct *p) 188 { 189 const struct cred *cred = current_cred(), *pcred = __task_cred(p); 190 191 if (uid_eq(pcred->uid, cred->euid) || 192 uid_eq(pcred->euid, cred->euid)) 193 return true; 194 if (ns_capable(pcred->user_ns, CAP_SYS_NICE)) 195 return true; 196 return false; 197 } 198 199 /* 200 * set the priority of a task 201 * - the caller must hold the RCU read lock 202 */ 203 static int set_one_prio(struct task_struct *p, int niceval, int error) 204 { 205 int no_nice; 206 207 if (!set_one_prio_perm(p)) { 208 error = -EPERM; 209 goto out; 210 } 211 if (niceval < task_nice(p) && !can_nice(p, niceval)) { 212 error = -EACCES; 213 goto out; 214 } 215 no_nice = security_task_setnice(p, niceval); 216 if (no_nice) { 217 error = no_nice; 218 goto out; 219 } 220 if (error == -ESRCH) 221 error = 0; 222 set_user_nice(p, niceval); 223 out: 224 return error; 225 } 226 227 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) 228 { 229 struct task_struct *g, *p; 230 struct user_struct *user; 231 const struct cred *cred = current_cred(); 232 int error = -EINVAL; 233 struct pid *pgrp; 234 kuid_t uid; 235 236 if (which > PRIO_USER || which < PRIO_PROCESS) 237 goto out; 238 239 /* normalize: avoid signed division (rounding problems) */ 240 error = -ESRCH; 241 if (niceval < MIN_NICE) 242 niceval = MIN_NICE; 243 if (niceval > MAX_NICE) 244 niceval = MAX_NICE; 245 246 rcu_read_lock(); 247 switch (which) { 248 case PRIO_PROCESS: 249 if (who) 250 p = find_task_by_vpid(who); 251 else 252 p = current; 253 if (p) 254 error = set_one_prio(p, niceval, error); 255 break; 256 case PRIO_PGRP: 257 if (who) 258 pgrp = find_vpid(who); 259 else 260 pgrp = task_pgrp(current); 261 read_lock(&tasklist_lock); 262 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 263 error = set_one_prio(p, niceval, error); 264 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 265 read_unlock(&tasklist_lock); 266 break; 267 case PRIO_USER: 268 uid = make_kuid(cred->user_ns, who); 269 user = cred->user; 270 if (!who) 271 uid = cred->uid; 272 else if (!uid_eq(uid, cred->uid)) { 273 user = find_user(uid); 274 if (!user) 275 goto out_unlock; /* No processes for this user */ 276 } 277 for_each_process_thread(g, p) { 278 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) 279 error = set_one_prio(p, niceval, error); 280 } 281 if (!uid_eq(uid, cred->uid)) 282 free_uid(user); /* For find_user() */ 283 break; 284 } 285 out_unlock: 286 rcu_read_unlock(); 287 out: 288 return error; 289 } 290 291 /* 292 * Ugh. To avoid negative return values, "getpriority()" will 293 * not return the normal nice-value, but a negated value that 294 * has been offset by 20 (ie it returns 40..1 instead of -20..19) 295 * to stay compatible. 296 */ 297 SYSCALL_DEFINE2(getpriority, int, which, int, who) 298 { 299 struct task_struct *g, *p; 300 struct user_struct *user; 301 const struct cred *cred = current_cred(); 302 long niceval, retval = -ESRCH; 303 struct pid *pgrp; 304 kuid_t uid; 305 306 if (which > PRIO_USER || which < PRIO_PROCESS) 307 return -EINVAL; 308 309 rcu_read_lock(); 310 switch (which) { 311 case PRIO_PROCESS: 312 if (who) 313 p = find_task_by_vpid(who); 314 else 315 p = current; 316 if (p) { 317 niceval = nice_to_rlimit(task_nice(p)); 318 if (niceval > retval) 319 retval = niceval; 320 } 321 break; 322 case PRIO_PGRP: 323 if (who) 324 pgrp = find_vpid(who); 325 else 326 pgrp = task_pgrp(current); 327 read_lock(&tasklist_lock); 328 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 329 niceval = nice_to_rlimit(task_nice(p)); 330 if (niceval > retval) 331 retval = niceval; 332 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 333 read_unlock(&tasklist_lock); 334 break; 335 case PRIO_USER: 336 uid = make_kuid(cred->user_ns, who); 337 user = cred->user; 338 if (!who) 339 uid = cred->uid; 340 else if (!uid_eq(uid, cred->uid)) { 341 user = find_user(uid); 342 if (!user) 343 goto out_unlock; /* No processes for this user */ 344 } 345 for_each_process_thread(g, p) { 346 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) { 347 niceval = nice_to_rlimit(task_nice(p)); 348 if (niceval > retval) 349 retval = niceval; 350 } 351 } 352 if (!uid_eq(uid, cred->uid)) 353 free_uid(user); /* for find_user() */ 354 break; 355 } 356 out_unlock: 357 rcu_read_unlock(); 358 359 return retval; 360 } 361 362 /* 363 * Unprivileged users may change the real gid to the effective gid 364 * or vice versa. (BSD-style) 365 * 366 * If you set the real gid at all, or set the effective gid to a value not 367 * equal to the real gid, then the saved gid is set to the new effective gid. 368 * 369 * This makes it possible for a setgid program to completely drop its 370 * privileges, which is often a useful assertion to make when you are doing 371 * a security audit over a program. 372 * 373 * The general idea is that a program which uses just setregid() will be 374 * 100% compatible with BSD. A program which uses just setgid() will be 375 * 100% compatible with POSIX with saved IDs. 376 * 377 * SMP: There are not races, the GIDs are checked only by filesystem 378 * operations (as far as semantic preservation is concerned). 379 */ 380 #ifdef CONFIG_MULTIUSER 381 long __sys_setregid(gid_t rgid, gid_t egid) 382 { 383 struct user_namespace *ns = current_user_ns(); 384 const struct cred *old; 385 struct cred *new; 386 int retval; 387 kgid_t krgid, kegid; 388 389 krgid = make_kgid(ns, rgid); 390 kegid = make_kgid(ns, egid); 391 392 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 393 return -EINVAL; 394 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 395 return -EINVAL; 396 397 new = prepare_creds(); 398 if (!new) 399 return -ENOMEM; 400 old = current_cred(); 401 402 retval = -EPERM; 403 if (rgid != (gid_t) -1) { 404 if (gid_eq(old->gid, krgid) || 405 gid_eq(old->egid, krgid) || 406 ns_capable_setid(old->user_ns, CAP_SETGID)) 407 new->gid = krgid; 408 else 409 goto error; 410 } 411 if (egid != (gid_t) -1) { 412 if (gid_eq(old->gid, kegid) || 413 gid_eq(old->egid, kegid) || 414 gid_eq(old->sgid, kegid) || 415 ns_capable_setid(old->user_ns, CAP_SETGID)) 416 new->egid = kegid; 417 else 418 goto error; 419 } 420 421 if (rgid != (gid_t) -1 || 422 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid))) 423 new->sgid = new->egid; 424 new->fsgid = new->egid; 425 426 retval = security_task_fix_setgid(new, old, LSM_SETID_RE); 427 if (retval < 0) 428 goto error; 429 430 return commit_creds(new); 431 432 error: 433 abort_creds(new); 434 return retval; 435 } 436 437 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) 438 { 439 return __sys_setregid(rgid, egid); 440 } 441 442 /* 443 * setgid() is implemented like SysV w/ SAVED_IDS 444 * 445 * SMP: Same implicit races as above. 446 */ 447 long __sys_setgid(gid_t gid) 448 { 449 struct user_namespace *ns = current_user_ns(); 450 const struct cred *old; 451 struct cred *new; 452 int retval; 453 kgid_t kgid; 454 455 kgid = make_kgid(ns, gid); 456 if (!gid_valid(kgid)) 457 return -EINVAL; 458 459 new = prepare_creds(); 460 if (!new) 461 return -ENOMEM; 462 old = current_cred(); 463 464 retval = -EPERM; 465 if (ns_capable_setid(old->user_ns, CAP_SETGID)) 466 new->gid = new->egid = new->sgid = new->fsgid = kgid; 467 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid)) 468 new->egid = new->fsgid = kgid; 469 else 470 goto error; 471 472 retval = security_task_fix_setgid(new, old, LSM_SETID_ID); 473 if (retval < 0) 474 goto error; 475 476 return commit_creds(new); 477 478 error: 479 abort_creds(new); 480 return retval; 481 } 482 483 SYSCALL_DEFINE1(setgid, gid_t, gid) 484 { 485 return __sys_setgid(gid); 486 } 487 488 /* 489 * change the user struct in a credentials set to match the new UID 490 */ 491 static int set_user(struct cred *new) 492 { 493 struct user_struct *new_user; 494 495 new_user = alloc_uid(new->uid); 496 if (!new_user) 497 return -EAGAIN; 498 499 free_uid(new->user); 500 new->user = new_user; 501 return 0; 502 } 503 504 static void flag_nproc_exceeded(struct cred *new) 505 { 506 if (new->ucounts == current_ucounts()) 507 return; 508 509 /* 510 * We don't fail in case of NPROC limit excess here because too many 511 * poorly written programs don't check set*uid() return code, assuming 512 * it never fails if called by root. We may still enforce NPROC limit 513 * for programs doing set*uid()+execve() by harmlessly deferring the 514 * failure to the execve() stage. 515 */ 516 if (is_rlimit_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) && 517 new->user != INIT_USER) 518 current->flags |= PF_NPROC_EXCEEDED; 519 else 520 current->flags &= ~PF_NPROC_EXCEEDED; 521 } 522 523 /* 524 * Unprivileged users may change the real uid to the effective uid 525 * or vice versa. (BSD-style) 526 * 527 * If you set the real uid at all, or set the effective uid to a value not 528 * equal to the real uid, then the saved uid is set to the new effective uid. 529 * 530 * This makes it possible for a setuid program to completely drop its 531 * privileges, which is often a useful assertion to make when you are doing 532 * a security audit over a program. 533 * 534 * The general idea is that a program which uses just setreuid() will be 535 * 100% compatible with BSD. A program which uses just setuid() will be 536 * 100% compatible with POSIX with saved IDs. 537 */ 538 long __sys_setreuid(uid_t ruid, uid_t euid) 539 { 540 struct user_namespace *ns = current_user_ns(); 541 const struct cred *old; 542 struct cred *new; 543 int retval; 544 kuid_t kruid, keuid; 545 546 kruid = make_kuid(ns, ruid); 547 keuid = make_kuid(ns, euid); 548 549 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 550 return -EINVAL; 551 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 552 return -EINVAL; 553 554 new = prepare_creds(); 555 if (!new) 556 return -ENOMEM; 557 old = current_cred(); 558 559 retval = -EPERM; 560 if (ruid != (uid_t) -1) { 561 new->uid = kruid; 562 if (!uid_eq(old->uid, kruid) && 563 !uid_eq(old->euid, kruid) && 564 !ns_capable_setid(old->user_ns, CAP_SETUID)) 565 goto error; 566 } 567 568 if (euid != (uid_t) -1) { 569 new->euid = keuid; 570 if (!uid_eq(old->uid, keuid) && 571 !uid_eq(old->euid, keuid) && 572 !uid_eq(old->suid, keuid) && 573 !ns_capable_setid(old->user_ns, CAP_SETUID)) 574 goto error; 575 } 576 577 if (!uid_eq(new->uid, old->uid)) { 578 retval = set_user(new); 579 if (retval < 0) 580 goto error; 581 } 582 if (ruid != (uid_t) -1 || 583 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid))) 584 new->suid = new->euid; 585 new->fsuid = new->euid; 586 587 retval = security_task_fix_setuid(new, old, LSM_SETID_RE); 588 if (retval < 0) 589 goto error; 590 591 retval = set_cred_ucounts(new); 592 if (retval < 0) 593 goto error; 594 595 flag_nproc_exceeded(new); 596 return commit_creds(new); 597 598 error: 599 abort_creds(new); 600 return retval; 601 } 602 603 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) 604 { 605 return __sys_setreuid(ruid, euid); 606 } 607 608 /* 609 * setuid() is implemented like SysV with SAVED_IDS 610 * 611 * Note that SAVED_ID's is deficient in that a setuid root program 612 * like sendmail, for example, cannot set its uid to be a normal 613 * user and then switch back, because if you're root, setuid() sets 614 * the saved uid too. If you don't like this, blame the bright people 615 * in the POSIX committee and/or USG. Note that the BSD-style setreuid() 616 * will allow a root program to temporarily drop privileges and be able to 617 * regain them by swapping the real and effective uid. 618 */ 619 long __sys_setuid(uid_t uid) 620 { 621 struct user_namespace *ns = current_user_ns(); 622 const struct cred *old; 623 struct cred *new; 624 int retval; 625 kuid_t kuid; 626 627 kuid = make_kuid(ns, uid); 628 if (!uid_valid(kuid)) 629 return -EINVAL; 630 631 new = prepare_creds(); 632 if (!new) 633 return -ENOMEM; 634 old = current_cred(); 635 636 retval = -EPERM; 637 if (ns_capable_setid(old->user_ns, CAP_SETUID)) { 638 new->suid = new->uid = kuid; 639 if (!uid_eq(kuid, old->uid)) { 640 retval = set_user(new); 641 if (retval < 0) 642 goto error; 643 } 644 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) { 645 goto error; 646 } 647 648 new->fsuid = new->euid = kuid; 649 650 retval = security_task_fix_setuid(new, old, LSM_SETID_ID); 651 if (retval < 0) 652 goto error; 653 654 retval = set_cred_ucounts(new); 655 if (retval < 0) 656 goto error; 657 658 flag_nproc_exceeded(new); 659 return commit_creds(new); 660 661 error: 662 abort_creds(new); 663 return retval; 664 } 665 666 SYSCALL_DEFINE1(setuid, uid_t, uid) 667 { 668 return __sys_setuid(uid); 669 } 670 671 672 /* 673 * This function implements a generic ability to update ruid, euid, 674 * and suid. This allows you to implement the 4.4 compatible seteuid(). 675 */ 676 long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) 677 { 678 struct user_namespace *ns = current_user_ns(); 679 const struct cred *old; 680 struct cred *new; 681 int retval; 682 kuid_t kruid, keuid, ksuid; 683 bool ruid_new, euid_new, suid_new; 684 685 kruid = make_kuid(ns, ruid); 686 keuid = make_kuid(ns, euid); 687 ksuid = make_kuid(ns, suid); 688 689 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 690 return -EINVAL; 691 692 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 693 return -EINVAL; 694 695 if ((suid != (uid_t) -1) && !uid_valid(ksuid)) 696 return -EINVAL; 697 698 old = current_cred(); 699 700 /* check for no-op */ 701 if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) && 702 (euid == (uid_t) -1 || (uid_eq(keuid, old->euid) && 703 uid_eq(keuid, old->fsuid))) && 704 (suid == (uid_t) -1 || uid_eq(ksuid, old->suid))) 705 return 0; 706 707 ruid_new = ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && 708 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid); 709 euid_new = euid != (uid_t) -1 && !uid_eq(keuid, old->uid) && 710 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid); 711 suid_new = suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) && 712 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid); 713 if ((ruid_new || euid_new || suid_new) && 714 !ns_capable_setid(old->user_ns, CAP_SETUID)) 715 return -EPERM; 716 717 new = prepare_creds(); 718 if (!new) 719 return -ENOMEM; 720 721 if (ruid != (uid_t) -1) { 722 new->uid = kruid; 723 if (!uid_eq(kruid, old->uid)) { 724 retval = set_user(new); 725 if (retval < 0) 726 goto error; 727 } 728 } 729 if (euid != (uid_t) -1) 730 new->euid = keuid; 731 if (suid != (uid_t) -1) 732 new->suid = ksuid; 733 new->fsuid = new->euid; 734 735 retval = security_task_fix_setuid(new, old, LSM_SETID_RES); 736 if (retval < 0) 737 goto error; 738 739 retval = set_cred_ucounts(new); 740 if (retval < 0) 741 goto error; 742 743 flag_nproc_exceeded(new); 744 return commit_creds(new); 745 746 error: 747 abort_creds(new); 748 return retval; 749 } 750 751 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 752 { 753 return __sys_setresuid(ruid, euid, suid); 754 } 755 756 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp) 757 { 758 const struct cred *cred = current_cred(); 759 int retval; 760 uid_t ruid, euid, suid; 761 762 ruid = from_kuid_munged(cred->user_ns, cred->uid); 763 euid = from_kuid_munged(cred->user_ns, cred->euid); 764 suid = from_kuid_munged(cred->user_ns, cred->suid); 765 766 retval = put_user(ruid, ruidp); 767 if (!retval) { 768 retval = put_user(euid, euidp); 769 if (!retval) 770 return put_user(suid, suidp); 771 } 772 return retval; 773 } 774 775 /* 776 * Same as above, but for rgid, egid, sgid. 777 */ 778 long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) 779 { 780 struct user_namespace *ns = current_user_ns(); 781 const struct cred *old; 782 struct cred *new; 783 int retval; 784 kgid_t krgid, kegid, ksgid; 785 bool rgid_new, egid_new, sgid_new; 786 787 krgid = make_kgid(ns, rgid); 788 kegid = make_kgid(ns, egid); 789 ksgid = make_kgid(ns, sgid); 790 791 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 792 return -EINVAL; 793 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 794 return -EINVAL; 795 if ((sgid != (gid_t) -1) && !gid_valid(ksgid)) 796 return -EINVAL; 797 798 old = current_cred(); 799 800 /* check for no-op */ 801 if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) && 802 (egid == (gid_t) -1 || (gid_eq(kegid, old->egid) && 803 gid_eq(kegid, old->fsgid))) && 804 (sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid))) 805 return 0; 806 807 rgid_new = rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && 808 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid); 809 egid_new = egid != (gid_t) -1 && !gid_eq(kegid, old->gid) && 810 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid); 811 sgid_new = sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) && 812 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid); 813 if ((rgid_new || egid_new || sgid_new) && 814 !ns_capable_setid(old->user_ns, CAP_SETGID)) 815 return -EPERM; 816 817 new = prepare_creds(); 818 if (!new) 819 return -ENOMEM; 820 821 if (rgid != (gid_t) -1) 822 new->gid = krgid; 823 if (egid != (gid_t) -1) 824 new->egid = kegid; 825 if (sgid != (gid_t) -1) 826 new->sgid = ksgid; 827 new->fsgid = new->egid; 828 829 retval = security_task_fix_setgid(new, old, LSM_SETID_RES); 830 if (retval < 0) 831 goto error; 832 833 return commit_creds(new); 834 835 error: 836 abort_creds(new); 837 return retval; 838 } 839 840 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 841 { 842 return __sys_setresgid(rgid, egid, sgid); 843 } 844 845 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp) 846 { 847 const struct cred *cred = current_cred(); 848 int retval; 849 gid_t rgid, egid, sgid; 850 851 rgid = from_kgid_munged(cred->user_ns, cred->gid); 852 egid = from_kgid_munged(cred->user_ns, cred->egid); 853 sgid = from_kgid_munged(cred->user_ns, cred->sgid); 854 855 retval = put_user(rgid, rgidp); 856 if (!retval) { 857 retval = put_user(egid, egidp); 858 if (!retval) 859 retval = put_user(sgid, sgidp); 860 } 861 862 return retval; 863 } 864 865 866 /* 867 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This 868 * is used for "access()" and for the NFS daemon (letting nfsd stay at 869 * whatever uid it wants to). It normally shadows "euid", except when 870 * explicitly set by setfsuid() or for access.. 871 */ 872 long __sys_setfsuid(uid_t uid) 873 { 874 const struct cred *old; 875 struct cred *new; 876 uid_t old_fsuid; 877 kuid_t kuid; 878 879 old = current_cred(); 880 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid); 881 882 kuid = make_kuid(old->user_ns, uid); 883 if (!uid_valid(kuid)) 884 return old_fsuid; 885 886 new = prepare_creds(); 887 if (!new) 888 return old_fsuid; 889 890 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) || 891 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) || 892 ns_capable_setid(old->user_ns, CAP_SETUID)) { 893 if (!uid_eq(kuid, old->fsuid)) { 894 new->fsuid = kuid; 895 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) 896 goto change_okay; 897 } 898 } 899 900 abort_creds(new); 901 return old_fsuid; 902 903 change_okay: 904 commit_creds(new); 905 return old_fsuid; 906 } 907 908 SYSCALL_DEFINE1(setfsuid, uid_t, uid) 909 { 910 return __sys_setfsuid(uid); 911 } 912 913 /* 914 * Samma på svenska.. 915 */ 916 long __sys_setfsgid(gid_t gid) 917 { 918 const struct cred *old; 919 struct cred *new; 920 gid_t old_fsgid; 921 kgid_t kgid; 922 923 old = current_cred(); 924 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid); 925 926 kgid = make_kgid(old->user_ns, gid); 927 if (!gid_valid(kgid)) 928 return old_fsgid; 929 930 new = prepare_creds(); 931 if (!new) 932 return old_fsgid; 933 934 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) || 935 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) || 936 ns_capable_setid(old->user_ns, CAP_SETGID)) { 937 if (!gid_eq(kgid, old->fsgid)) { 938 new->fsgid = kgid; 939 if (security_task_fix_setgid(new,old,LSM_SETID_FS) == 0) 940 goto change_okay; 941 } 942 } 943 944 abort_creds(new); 945 return old_fsgid; 946 947 change_okay: 948 commit_creds(new); 949 return old_fsgid; 950 } 951 952 SYSCALL_DEFINE1(setfsgid, gid_t, gid) 953 { 954 return __sys_setfsgid(gid); 955 } 956 #endif /* CONFIG_MULTIUSER */ 957 958 /** 959 * sys_getpid - return the thread group id of the current process 960 * 961 * Note, despite the name, this returns the tgid not the pid. The tgid and 962 * the pid are identical unless CLONE_THREAD was specified on clone() in 963 * which case the tgid is the same in all threads of the same group. 964 * 965 * This is SMP safe as current->tgid does not change. 966 */ 967 SYSCALL_DEFINE0(getpid) 968 { 969 return task_tgid_vnr(current); 970 } 971 972 /* Thread ID - the internal kernel "pid" */ 973 SYSCALL_DEFINE0(gettid) 974 { 975 return task_pid_vnr(current); 976 } 977 978 /* 979 * Accessing ->real_parent is not SMP-safe, it could 980 * change from under us. However, we can use a stale 981 * value of ->real_parent under rcu_read_lock(), see 982 * release_task()->call_rcu(delayed_put_task_struct). 983 */ 984 SYSCALL_DEFINE0(getppid) 985 { 986 int pid; 987 988 rcu_read_lock(); 989 pid = task_tgid_vnr(rcu_dereference(current->real_parent)); 990 rcu_read_unlock(); 991 992 return pid; 993 } 994 995 SYSCALL_DEFINE0(getuid) 996 { 997 /* Only we change this so SMP safe */ 998 return from_kuid_munged(current_user_ns(), current_uid()); 999 } 1000 1001 SYSCALL_DEFINE0(geteuid) 1002 { 1003 /* Only we change this so SMP safe */ 1004 return from_kuid_munged(current_user_ns(), current_euid()); 1005 } 1006 1007 SYSCALL_DEFINE0(getgid) 1008 { 1009 /* Only we change this so SMP safe */ 1010 return from_kgid_munged(current_user_ns(), current_gid()); 1011 } 1012 1013 SYSCALL_DEFINE0(getegid) 1014 { 1015 /* Only we change this so SMP safe */ 1016 return from_kgid_munged(current_user_ns(), current_egid()); 1017 } 1018 1019 static void do_sys_times(struct tms *tms) 1020 { 1021 u64 tgutime, tgstime, cutime, cstime; 1022 1023 thread_group_cputime_adjusted(current, &tgutime, &tgstime); 1024 cutime = current->signal->cutime; 1025 cstime = current->signal->cstime; 1026 tms->tms_utime = nsec_to_clock_t(tgutime); 1027 tms->tms_stime = nsec_to_clock_t(tgstime); 1028 tms->tms_cutime = nsec_to_clock_t(cutime); 1029 tms->tms_cstime = nsec_to_clock_t(cstime); 1030 } 1031 1032 SYSCALL_DEFINE1(times, struct tms __user *, tbuf) 1033 { 1034 if (tbuf) { 1035 struct tms tmp; 1036 1037 do_sys_times(&tmp); 1038 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) 1039 return -EFAULT; 1040 } 1041 force_successful_syscall_return(); 1042 return (long) jiffies_64_to_clock_t(get_jiffies_64()); 1043 } 1044 1045 #ifdef CONFIG_COMPAT 1046 static compat_clock_t clock_t_to_compat_clock_t(clock_t x) 1047 { 1048 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); 1049 } 1050 1051 COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf) 1052 { 1053 if (tbuf) { 1054 struct tms tms; 1055 struct compat_tms tmp; 1056 1057 do_sys_times(&tms); 1058 /* Convert our struct tms to the compat version. */ 1059 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); 1060 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); 1061 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); 1062 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); 1063 if (copy_to_user(tbuf, &tmp, sizeof(tmp))) 1064 return -EFAULT; 1065 } 1066 force_successful_syscall_return(); 1067 return compat_jiffies_to_clock_t(jiffies); 1068 } 1069 #endif 1070 1071 /* 1072 * This needs some heavy checking ... 1073 * I just haven't the stomach for it. I also don't fully 1074 * understand sessions/pgrp etc. Let somebody who does explain it. 1075 * 1076 * OK, I think I have the protection semantics right.... this is really 1077 * only important on a multi-user system anyway, to make sure one user 1078 * can't send a signal to a process owned by another. -TYT, 12/12/91 1079 * 1080 * !PF_FORKNOEXEC check to conform completely to POSIX. 1081 */ 1082 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) 1083 { 1084 struct task_struct *p; 1085 struct task_struct *group_leader = current->group_leader; 1086 struct pid *pgrp; 1087 int err; 1088 1089 if (!pid) 1090 pid = task_pid_vnr(group_leader); 1091 if (!pgid) 1092 pgid = pid; 1093 if (pgid < 0) 1094 return -EINVAL; 1095 rcu_read_lock(); 1096 1097 /* From this point forward we keep holding onto the tasklist lock 1098 * so that our parent does not change from under us. -DaveM 1099 */ 1100 write_lock_irq(&tasklist_lock); 1101 1102 err = -ESRCH; 1103 p = find_task_by_vpid(pid); 1104 if (!p) 1105 goto out; 1106 1107 err = -EINVAL; 1108 if (!thread_group_leader(p)) 1109 goto out; 1110 1111 if (same_thread_group(p->real_parent, group_leader)) { 1112 err = -EPERM; 1113 if (task_session(p) != task_session(group_leader)) 1114 goto out; 1115 err = -EACCES; 1116 if (!(p->flags & PF_FORKNOEXEC)) 1117 goto out; 1118 } else { 1119 err = -ESRCH; 1120 if (p != group_leader) 1121 goto out; 1122 } 1123 1124 err = -EPERM; 1125 if (p->signal->leader) 1126 goto out; 1127 1128 pgrp = task_pid(p); 1129 if (pgid != pid) { 1130 struct task_struct *g; 1131 1132 pgrp = find_vpid(pgid); 1133 g = pid_task(pgrp, PIDTYPE_PGID); 1134 if (!g || task_session(g) != task_session(group_leader)) 1135 goto out; 1136 } 1137 1138 err = security_task_setpgid(p, pgid); 1139 if (err) 1140 goto out; 1141 1142 if (task_pgrp(p) != pgrp) 1143 change_pid(p, PIDTYPE_PGID, pgrp); 1144 1145 err = 0; 1146 out: 1147 /* All paths lead to here, thus we are safe. -DaveM */ 1148 write_unlock_irq(&tasklist_lock); 1149 rcu_read_unlock(); 1150 return err; 1151 } 1152 1153 static int do_getpgid(pid_t pid) 1154 { 1155 struct task_struct *p; 1156 struct pid *grp; 1157 int retval; 1158 1159 rcu_read_lock(); 1160 if (!pid) 1161 grp = task_pgrp(current); 1162 else { 1163 retval = -ESRCH; 1164 p = find_task_by_vpid(pid); 1165 if (!p) 1166 goto out; 1167 grp = task_pgrp(p); 1168 if (!grp) 1169 goto out; 1170 1171 retval = security_task_getpgid(p); 1172 if (retval) 1173 goto out; 1174 } 1175 retval = pid_vnr(grp); 1176 out: 1177 rcu_read_unlock(); 1178 return retval; 1179 } 1180 1181 SYSCALL_DEFINE1(getpgid, pid_t, pid) 1182 { 1183 return do_getpgid(pid); 1184 } 1185 1186 #ifdef __ARCH_WANT_SYS_GETPGRP 1187 1188 SYSCALL_DEFINE0(getpgrp) 1189 { 1190 return do_getpgid(0); 1191 } 1192 1193 #endif 1194 1195 SYSCALL_DEFINE1(getsid, pid_t, pid) 1196 { 1197 struct task_struct *p; 1198 struct pid *sid; 1199 int retval; 1200 1201 rcu_read_lock(); 1202 if (!pid) 1203 sid = task_session(current); 1204 else { 1205 retval = -ESRCH; 1206 p = find_task_by_vpid(pid); 1207 if (!p) 1208 goto out; 1209 sid = task_session(p); 1210 if (!sid) 1211 goto out; 1212 1213 retval = security_task_getsid(p); 1214 if (retval) 1215 goto out; 1216 } 1217 retval = pid_vnr(sid); 1218 out: 1219 rcu_read_unlock(); 1220 return retval; 1221 } 1222 1223 static void set_special_pids(struct pid *pid) 1224 { 1225 struct task_struct *curr = current->group_leader; 1226 1227 if (task_session(curr) != pid) 1228 change_pid(curr, PIDTYPE_SID, pid); 1229 1230 if (task_pgrp(curr) != pid) 1231 change_pid(curr, PIDTYPE_PGID, pid); 1232 } 1233 1234 int ksys_setsid(void) 1235 { 1236 struct task_struct *group_leader = current->group_leader; 1237 struct pid *sid = task_pid(group_leader); 1238 pid_t session = pid_vnr(sid); 1239 int err = -EPERM; 1240 1241 write_lock_irq(&tasklist_lock); 1242 /* Fail if I am already a session leader */ 1243 if (group_leader->signal->leader) 1244 goto out; 1245 1246 /* Fail if a process group id already exists that equals the 1247 * proposed session id. 1248 */ 1249 if (pid_task(sid, PIDTYPE_PGID)) 1250 goto out; 1251 1252 group_leader->signal->leader = 1; 1253 set_special_pids(sid); 1254 1255 proc_clear_tty(group_leader); 1256 1257 err = session; 1258 out: 1259 write_unlock_irq(&tasklist_lock); 1260 if (err > 0) { 1261 proc_sid_connector(group_leader); 1262 sched_autogroup_create_attach(group_leader); 1263 } 1264 return err; 1265 } 1266 1267 SYSCALL_DEFINE0(setsid) 1268 { 1269 return ksys_setsid(); 1270 } 1271 1272 DECLARE_RWSEM(uts_sem); 1273 1274 #ifdef COMPAT_UTS_MACHINE 1275 #define override_architecture(name) \ 1276 (personality(current->personality) == PER_LINUX32 && \ 1277 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ 1278 sizeof(COMPAT_UTS_MACHINE))) 1279 #else 1280 #define override_architecture(name) 0 1281 #endif 1282 1283 /* 1284 * Work around broken programs that cannot handle "Linux 3.0". 1285 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 1286 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be 1287 * 2.6.60. 1288 */ 1289 static int override_release(char __user *release, size_t len) 1290 { 1291 int ret = 0; 1292 1293 if (current->personality & UNAME26) { 1294 const char *rest = UTS_RELEASE; 1295 char buf[65] = { 0 }; 1296 int ndots = 0; 1297 unsigned v; 1298 size_t copy; 1299 1300 while (*rest) { 1301 if (*rest == '.' && ++ndots >= 3) 1302 break; 1303 if (!isdigit(*rest) && *rest != '.') 1304 break; 1305 rest++; 1306 } 1307 v = LINUX_VERSION_PATCHLEVEL + 60; 1308 copy = clamp_t(size_t, len, 1, sizeof(buf)); 1309 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); 1310 ret = copy_to_user(release, buf, copy + 1); 1311 } 1312 return ret; 1313 } 1314 1315 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) 1316 { 1317 struct new_utsname tmp; 1318 1319 down_read(&uts_sem); 1320 memcpy(&tmp, utsname(), sizeof(tmp)); 1321 up_read(&uts_sem); 1322 if (copy_to_user(name, &tmp, sizeof(tmp))) 1323 return -EFAULT; 1324 1325 if (override_release(name->release, sizeof(name->release))) 1326 return -EFAULT; 1327 if (override_architecture(name)) 1328 return -EFAULT; 1329 return 0; 1330 } 1331 1332 #ifdef __ARCH_WANT_SYS_OLD_UNAME 1333 /* 1334 * Old cruft 1335 */ 1336 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) 1337 { 1338 struct old_utsname tmp; 1339 1340 if (!name) 1341 return -EFAULT; 1342 1343 down_read(&uts_sem); 1344 memcpy(&tmp, utsname(), sizeof(tmp)); 1345 up_read(&uts_sem); 1346 if (copy_to_user(name, &tmp, sizeof(tmp))) 1347 return -EFAULT; 1348 1349 if (override_release(name->release, sizeof(name->release))) 1350 return -EFAULT; 1351 if (override_architecture(name)) 1352 return -EFAULT; 1353 return 0; 1354 } 1355 1356 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) 1357 { 1358 struct oldold_utsname tmp; 1359 1360 if (!name) 1361 return -EFAULT; 1362 1363 memset(&tmp, 0, sizeof(tmp)); 1364 1365 down_read(&uts_sem); 1366 memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN); 1367 memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN); 1368 memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN); 1369 memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN); 1370 memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN); 1371 up_read(&uts_sem); 1372 if (copy_to_user(name, &tmp, sizeof(tmp))) 1373 return -EFAULT; 1374 1375 if (override_architecture(name)) 1376 return -EFAULT; 1377 if (override_release(name->release, sizeof(name->release))) 1378 return -EFAULT; 1379 return 0; 1380 } 1381 #endif 1382 1383 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) 1384 { 1385 int errno; 1386 char tmp[__NEW_UTS_LEN]; 1387 1388 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1389 return -EPERM; 1390 1391 if (len < 0 || len > __NEW_UTS_LEN) 1392 return -EINVAL; 1393 errno = -EFAULT; 1394 if (!copy_from_user(tmp, name, len)) { 1395 struct new_utsname *u; 1396 1397 add_device_randomness(tmp, len); 1398 down_write(&uts_sem); 1399 u = utsname(); 1400 memcpy(u->nodename, tmp, len); 1401 memset(u->nodename + len, 0, sizeof(u->nodename) - len); 1402 errno = 0; 1403 uts_proc_notify(UTS_PROC_HOSTNAME); 1404 up_write(&uts_sem); 1405 } 1406 return errno; 1407 } 1408 1409 #ifdef __ARCH_WANT_SYS_GETHOSTNAME 1410 1411 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) 1412 { 1413 int i; 1414 struct new_utsname *u; 1415 char tmp[__NEW_UTS_LEN + 1]; 1416 1417 if (len < 0) 1418 return -EINVAL; 1419 down_read(&uts_sem); 1420 u = utsname(); 1421 i = 1 + strlen(u->nodename); 1422 if (i > len) 1423 i = len; 1424 memcpy(tmp, u->nodename, i); 1425 up_read(&uts_sem); 1426 if (copy_to_user(name, tmp, i)) 1427 return -EFAULT; 1428 return 0; 1429 } 1430 1431 #endif 1432 1433 /* 1434 * Only setdomainname; getdomainname can be implemented by calling 1435 * uname() 1436 */ 1437 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) 1438 { 1439 int errno; 1440 char tmp[__NEW_UTS_LEN]; 1441 1442 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1443 return -EPERM; 1444 if (len < 0 || len > __NEW_UTS_LEN) 1445 return -EINVAL; 1446 1447 errno = -EFAULT; 1448 if (!copy_from_user(tmp, name, len)) { 1449 struct new_utsname *u; 1450 1451 add_device_randomness(tmp, len); 1452 down_write(&uts_sem); 1453 u = utsname(); 1454 memcpy(u->domainname, tmp, len); 1455 memset(u->domainname + len, 0, sizeof(u->domainname) - len); 1456 errno = 0; 1457 uts_proc_notify(UTS_PROC_DOMAINNAME); 1458 up_write(&uts_sem); 1459 } 1460 return errno; 1461 } 1462 1463 /* make sure you are allowed to change @tsk limits before calling this */ 1464 static int do_prlimit(struct task_struct *tsk, unsigned int resource, 1465 struct rlimit *new_rlim, struct rlimit *old_rlim) 1466 { 1467 struct rlimit *rlim; 1468 int retval = 0; 1469 1470 if (resource >= RLIM_NLIMITS) 1471 return -EINVAL; 1472 resource = array_index_nospec(resource, RLIM_NLIMITS); 1473 1474 if (new_rlim) { 1475 if (new_rlim->rlim_cur > new_rlim->rlim_max) 1476 return -EINVAL; 1477 if (resource == RLIMIT_NOFILE && 1478 new_rlim->rlim_max > sysctl_nr_open) 1479 return -EPERM; 1480 } 1481 1482 /* Holding a refcount on tsk protects tsk->signal from disappearing. */ 1483 rlim = tsk->signal->rlim + resource; 1484 task_lock(tsk->group_leader); 1485 if (new_rlim) { 1486 /* 1487 * Keep the capable check against init_user_ns until cgroups can 1488 * contain all limits. 1489 */ 1490 if (new_rlim->rlim_max > rlim->rlim_max && 1491 !capable(CAP_SYS_RESOURCE)) 1492 retval = -EPERM; 1493 if (!retval) 1494 retval = security_task_setrlimit(tsk, resource, new_rlim); 1495 } 1496 if (!retval) { 1497 if (old_rlim) 1498 *old_rlim = *rlim; 1499 if (new_rlim) 1500 *rlim = *new_rlim; 1501 } 1502 task_unlock(tsk->group_leader); 1503 1504 /* 1505 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not 1506 * infinite. In case of RLIM_INFINITY the posix CPU timer code 1507 * ignores the rlimit. 1508 */ 1509 if (!retval && new_rlim && resource == RLIMIT_CPU && 1510 new_rlim->rlim_cur != RLIM_INFINITY && 1511 IS_ENABLED(CONFIG_POSIX_TIMERS)) { 1512 /* 1513 * update_rlimit_cpu can fail if the task is exiting, but there 1514 * may be other tasks in the thread group that are not exiting, 1515 * and they need their cpu timers adjusted. 1516 * 1517 * The group_leader is the last task to be released, so if we 1518 * cannot update_rlimit_cpu on it, then the entire process is 1519 * exiting and we do not need to update at all. 1520 */ 1521 update_rlimit_cpu(tsk->group_leader, new_rlim->rlim_cur); 1522 } 1523 1524 return retval; 1525 } 1526 1527 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1528 { 1529 struct rlimit value; 1530 int ret; 1531 1532 ret = do_prlimit(current, resource, NULL, &value); 1533 if (!ret) 1534 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; 1535 1536 return ret; 1537 } 1538 1539 #ifdef CONFIG_COMPAT 1540 1541 COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource, 1542 struct compat_rlimit __user *, rlim) 1543 { 1544 struct rlimit r; 1545 struct compat_rlimit r32; 1546 1547 if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit))) 1548 return -EFAULT; 1549 1550 if (r32.rlim_cur == COMPAT_RLIM_INFINITY) 1551 r.rlim_cur = RLIM_INFINITY; 1552 else 1553 r.rlim_cur = r32.rlim_cur; 1554 if (r32.rlim_max == COMPAT_RLIM_INFINITY) 1555 r.rlim_max = RLIM_INFINITY; 1556 else 1557 r.rlim_max = r32.rlim_max; 1558 return do_prlimit(current, resource, &r, NULL); 1559 } 1560 1561 COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource, 1562 struct compat_rlimit __user *, rlim) 1563 { 1564 struct rlimit r; 1565 int ret; 1566 1567 ret = do_prlimit(current, resource, NULL, &r); 1568 if (!ret) { 1569 struct compat_rlimit r32; 1570 if (r.rlim_cur > COMPAT_RLIM_INFINITY) 1571 r32.rlim_cur = COMPAT_RLIM_INFINITY; 1572 else 1573 r32.rlim_cur = r.rlim_cur; 1574 if (r.rlim_max > COMPAT_RLIM_INFINITY) 1575 r32.rlim_max = COMPAT_RLIM_INFINITY; 1576 else 1577 r32.rlim_max = r.rlim_max; 1578 1579 if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit))) 1580 return -EFAULT; 1581 } 1582 return ret; 1583 } 1584 1585 #endif 1586 1587 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT 1588 1589 /* 1590 * Back compatibility for getrlimit. Needed for some apps. 1591 */ 1592 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, 1593 struct rlimit __user *, rlim) 1594 { 1595 struct rlimit x; 1596 if (resource >= RLIM_NLIMITS) 1597 return -EINVAL; 1598 1599 resource = array_index_nospec(resource, RLIM_NLIMITS); 1600 task_lock(current->group_leader); 1601 x = current->signal->rlim[resource]; 1602 task_unlock(current->group_leader); 1603 if (x.rlim_cur > 0x7FFFFFFF) 1604 x.rlim_cur = 0x7FFFFFFF; 1605 if (x.rlim_max > 0x7FFFFFFF) 1606 x.rlim_max = 0x7FFFFFFF; 1607 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0; 1608 } 1609 1610 #ifdef CONFIG_COMPAT 1611 COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, 1612 struct compat_rlimit __user *, rlim) 1613 { 1614 struct rlimit r; 1615 1616 if (resource >= RLIM_NLIMITS) 1617 return -EINVAL; 1618 1619 resource = array_index_nospec(resource, RLIM_NLIMITS); 1620 task_lock(current->group_leader); 1621 r = current->signal->rlim[resource]; 1622 task_unlock(current->group_leader); 1623 if (r.rlim_cur > 0x7FFFFFFF) 1624 r.rlim_cur = 0x7FFFFFFF; 1625 if (r.rlim_max > 0x7FFFFFFF) 1626 r.rlim_max = 0x7FFFFFFF; 1627 1628 if (put_user(r.rlim_cur, &rlim->rlim_cur) || 1629 put_user(r.rlim_max, &rlim->rlim_max)) 1630 return -EFAULT; 1631 return 0; 1632 } 1633 #endif 1634 1635 #endif 1636 1637 static inline bool rlim64_is_infinity(__u64 rlim64) 1638 { 1639 #if BITS_PER_LONG < 64 1640 return rlim64 >= ULONG_MAX; 1641 #else 1642 return rlim64 == RLIM64_INFINITY; 1643 #endif 1644 } 1645 1646 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64) 1647 { 1648 if (rlim->rlim_cur == RLIM_INFINITY) 1649 rlim64->rlim_cur = RLIM64_INFINITY; 1650 else 1651 rlim64->rlim_cur = rlim->rlim_cur; 1652 if (rlim->rlim_max == RLIM_INFINITY) 1653 rlim64->rlim_max = RLIM64_INFINITY; 1654 else 1655 rlim64->rlim_max = rlim->rlim_max; 1656 } 1657 1658 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim) 1659 { 1660 if (rlim64_is_infinity(rlim64->rlim_cur)) 1661 rlim->rlim_cur = RLIM_INFINITY; 1662 else 1663 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur; 1664 if (rlim64_is_infinity(rlim64->rlim_max)) 1665 rlim->rlim_max = RLIM_INFINITY; 1666 else 1667 rlim->rlim_max = (unsigned long)rlim64->rlim_max; 1668 } 1669 1670 /* rcu lock must be held */ 1671 static int check_prlimit_permission(struct task_struct *task, 1672 unsigned int flags) 1673 { 1674 const struct cred *cred = current_cred(), *tcred; 1675 bool id_match; 1676 1677 if (current == task) 1678 return 0; 1679 1680 tcred = __task_cred(task); 1681 id_match = (uid_eq(cred->uid, tcred->euid) && 1682 uid_eq(cred->uid, tcred->suid) && 1683 uid_eq(cred->uid, tcred->uid) && 1684 gid_eq(cred->gid, tcred->egid) && 1685 gid_eq(cred->gid, tcred->sgid) && 1686 gid_eq(cred->gid, tcred->gid)); 1687 if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE)) 1688 return -EPERM; 1689 1690 return security_task_prlimit(cred, tcred, flags); 1691 } 1692 1693 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, 1694 const struct rlimit64 __user *, new_rlim, 1695 struct rlimit64 __user *, old_rlim) 1696 { 1697 struct rlimit64 old64, new64; 1698 struct rlimit old, new; 1699 struct task_struct *tsk; 1700 unsigned int checkflags = 0; 1701 int ret; 1702 1703 if (old_rlim) 1704 checkflags |= LSM_PRLIMIT_READ; 1705 1706 if (new_rlim) { 1707 if (copy_from_user(&new64, new_rlim, sizeof(new64))) 1708 return -EFAULT; 1709 rlim64_to_rlim(&new64, &new); 1710 checkflags |= LSM_PRLIMIT_WRITE; 1711 } 1712 1713 rcu_read_lock(); 1714 tsk = pid ? find_task_by_vpid(pid) : current; 1715 if (!tsk) { 1716 rcu_read_unlock(); 1717 return -ESRCH; 1718 } 1719 ret = check_prlimit_permission(tsk, checkflags); 1720 if (ret) { 1721 rcu_read_unlock(); 1722 return ret; 1723 } 1724 get_task_struct(tsk); 1725 rcu_read_unlock(); 1726 1727 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, 1728 old_rlim ? &old : NULL); 1729 1730 if (!ret && old_rlim) { 1731 rlim_to_rlim64(&old, &old64); 1732 if (copy_to_user(old_rlim, &old64, sizeof(old64))) 1733 ret = -EFAULT; 1734 } 1735 1736 put_task_struct(tsk); 1737 return ret; 1738 } 1739 1740 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1741 { 1742 struct rlimit new_rlim; 1743 1744 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1745 return -EFAULT; 1746 return do_prlimit(current, resource, &new_rlim, NULL); 1747 } 1748 1749 /* 1750 * It would make sense to put struct rusage in the task_struct, 1751 * except that would make the task_struct be *really big*. After 1752 * task_struct gets moved into malloc'ed memory, it would 1753 * make sense to do this. It will make moving the rest of the information 1754 * a lot simpler! (Which we're not doing right now because we're not 1755 * measuring them yet). 1756 * 1757 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have 1758 * races with threads incrementing their own counters. But since word 1759 * reads are atomic, we either get new values or old values and we don't 1760 * care which for the sums. We always take the siglock to protect reading 1761 * the c* fields from p->signal from races with exit.c updating those 1762 * fields when reaping, so a sample either gets all the additions of a 1763 * given child after it's reaped, or none so this sample is before reaping. 1764 * 1765 * Locking: 1766 * We need to take the siglock for CHILDEREN, SELF and BOTH 1767 * for the cases current multithreaded, non-current single threaded 1768 * non-current multithreaded. Thread traversal is now safe with 1769 * the siglock held. 1770 * Strictly speaking, we donot need to take the siglock if we are current and 1771 * single threaded, as no one else can take our signal_struct away, no one 1772 * else can reap the children to update signal->c* counters, and no one else 1773 * can race with the signal-> fields. If we do not take any lock, the 1774 * signal-> fields could be read out of order while another thread was just 1775 * exiting. So we should place a read memory barrier when we avoid the lock. 1776 * On the writer side, write memory barrier is implied in __exit_signal 1777 * as __exit_signal releases the siglock spinlock after updating the signal-> 1778 * fields. But we don't do this yet to keep things simple. 1779 * 1780 */ 1781 1782 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r) 1783 { 1784 r->ru_nvcsw += t->nvcsw; 1785 r->ru_nivcsw += t->nivcsw; 1786 r->ru_minflt += t->min_flt; 1787 r->ru_majflt += t->maj_flt; 1788 r->ru_inblock += task_io_get_inblock(t); 1789 r->ru_oublock += task_io_get_oublock(t); 1790 } 1791 1792 void getrusage(struct task_struct *p, int who, struct rusage *r) 1793 { 1794 struct task_struct *t; 1795 unsigned long flags; 1796 u64 tgutime, tgstime, utime, stime; 1797 unsigned long maxrss; 1798 struct mm_struct *mm; 1799 struct signal_struct *sig = p->signal; 1800 unsigned int seq = 0; 1801 1802 retry: 1803 memset(r, 0, sizeof(*r)); 1804 utime = stime = 0; 1805 maxrss = 0; 1806 1807 if (who == RUSAGE_THREAD) { 1808 task_cputime_adjusted(current, &utime, &stime); 1809 accumulate_thread_rusage(p, r); 1810 maxrss = sig->maxrss; 1811 goto out_thread; 1812 } 1813 1814 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq); 1815 1816 switch (who) { 1817 case RUSAGE_BOTH: 1818 case RUSAGE_CHILDREN: 1819 utime = sig->cutime; 1820 stime = sig->cstime; 1821 r->ru_nvcsw = sig->cnvcsw; 1822 r->ru_nivcsw = sig->cnivcsw; 1823 r->ru_minflt = sig->cmin_flt; 1824 r->ru_majflt = sig->cmaj_flt; 1825 r->ru_inblock = sig->cinblock; 1826 r->ru_oublock = sig->coublock; 1827 maxrss = sig->cmaxrss; 1828 1829 if (who == RUSAGE_CHILDREN) 1830 break; 1831 fallthrough; 1832 1833 case RUSAGE_SELF: 1834 r->ru_nvcsw += sig->nvcsw; 1835 r->ru_nivcsw += sig->nivcsw; 1836 r->ru_minflt += sig->min_flt; 1837 r->ru_majflt += sig->maj_flt; 1838 r->ru_inblock += sig->inblock; 1839 r->ru_oublock += sig->oublock; 1840 if (maxrss < sig->maxrss) 1841 maxrss = sig->maxrss; 1842 1843 rcu_read_lock(); 1844 __for_each_thread(sig, t) 1845 accumulate_thread_rusage(t, r); 1846 rcu_read_unlock(); 1847 1848 break; 1849 1850 default: 1851 BUG(); 1852 } 1853 1854 if (need_seqretry(&sig->stats_lock, seq)) { 1855 seq = 1; 1856 goto retry; 1857 } 1858 done_seqretry_irqrestore(&sig->stats_lock, seq, flags); 1859 1860 if (who == RUSAGE_CHILDREN) 1861 goto out_children; 1862 1863 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1864 utime += tgutime; 1865 stime += tgstime; 1866 1867 out_thread: 1868 mm = get_task_mm(p); 1869 if (mm) { 1870 setmax_mm_hiwater_rss(&maxrss, mm); 1871 mmput(mm); 1872 } 1873 1874 out_children: 1875 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ 1876 r->ru_utime = ns_to_kernel_old_timeval(utime); 1877 r->ru_stime = ns_to_kernel_old_timeval(stime); 1878 } 1879 1880 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru) 1881 { 1882 struct rusage r; 1883 1884 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1885 who != RUSAGE_THREAD) 1886 return -EINVAL; 1887 1888 getrusage(current, who, &r); 1889 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1890 } 1891 1892 #ifdef CONFIG_COMPAT 1893 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru) 1894 { 1895 struct rusage r; 1896 1897 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1898 who != RUSAGE_THREAD) 1899 return -EINVAL; 1900 1901 getrusage(current, who, &r); 1902 return put_compat_rusage(&r, ru); 1903 } 1904 #endif 1905 1906 SYSCALL_DEFINE1(umask, int, mask) 1907 { 1908 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); 1909 return mask; 1910 } 1911 1912 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) 1913 { 1914 CLASS(fd, exe)(fd); 1915 struct inode *inode; 1916 int err; 1917 1918 if (fd_empty(exe)) 1919 return -EBADF; 1920 1921 inode = file_inode(fd_file(exe)); 1922 1923 /* 1924 * Because the original mm->exe_file points to executable file, make 1925 * sure that this one is executable as well, to avoid breaking an 1926 * overall picture. 1927 */ 1928 if (!S_ISREG(inode->i_mode) || path_noexec(&fd_file(exe)->f_path)) 1929 return -EACCES; 1930 1931 err = file_permission(fd_file(exe), MAY_EXEC); 1932 if (err) 1933 return err; 1934 1935 return replace_mm_exe_file(mm, fd_file(exe)); 1936 } 1937 1938 /* 1939 * Check arithmetic relations of passed addresses. 1940 * 1941 * WARNING: we don't require any capability here so be very careful 1942 * in what is allowed for modification from userspace. 1943 */ 1944 static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map) 1945 { 1946 unsigned long mmap_max_addr = TASK_SIZE; 1947 int error = -EINVAL, i; 1948 1949 static const unsigned char offsets[] = { 1950 offsetof(struct prctl_mm_map, start_code), 1951 offsetof(struct prctl_mm_map, end_code), 1952 offsetof(struct prctl_mm_map, start_data), 1953 offsetof(struct prctl_mm_map, end_data), 1954 offsetof(struct prctl_mm_map, start_brk), 1955 offsetof(struct prctl_mm_map, brk), 1956 offsetof(struct prctl_mm_map, start_stack), 1957 offsetof(struct prctl_mm_map, arg_start), 1958 offsetof(struct prctl_mm_map, arg_end), 1959 offsetof(struct prctl_mm_map, env_start), 1960 offsetof(struct prctl_mm_map, env_end), 1961 }; 1962 1963 /* 1964 * Make sure the members are not somewhere outside 1965 * of allowed address space. 1966 */ 1967 for (i = 0; i < ARRAY_SIZE(offsets); i++) { 1968 u64 val = *(u64 *)((char *)prctl_map + offsets[i]); 1969 1970 if ((unsigned long)val >= mmap_max_addr || 1971 (unsigned long)val < mmap_min_addr) 1972 goto out; 1973 } 1974 1975 /* 1976 * Make sure the pairs are ordered. 1977 */ 1978 #define __prctl_check_order(__m1, __op, __m2) \ 1979 ((unsigned long)prctl_map->__m1 __op \ 1980 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL 1981 error = __prctl_check_order(start_code, <, end_code); 1982 error |= __prctl_check_order(start_data,<=, end_data); 1983 error |= __prctl_check_order(start_brk, <=, brk); 1984 error |= __prctl_check_order(arg_start, <=, arg_end); 1985 error |= __prctl_check_order(env_start, <=, env_end); 1986 if (error) 1987 goto out; 1988 #undef __prctl_check_order 1989 1990 error = -EINVAL; 1991 1992 /* 1993 * Neither we should allow to override limits if they set. 1994 */ 1995 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk, 1996 prctl_map->start_brk, prctl_map->end_data, 1997 prctl_map->start_data)) 1998 goto out; 1999 2000 error = 0; 2001 out: 2002 return error; 2003 } 2004 2005 #ifdef CONFIG_CHECKPOINT_RESTORE 2006 static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size) 2007 { 2008 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, }; 2009 unsigned long user_auxv[AT_VECTOR_SIZE]; 2010 struct mm_struct *mm = current->mm; 2011 int error; 2012 2013 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); 2014 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256); 2015 2016 if (opt == PR_SET_MM_MAP_SIZE) 2017 return put_user((unsigned int)sizeof(prctl_map), 2018 (unsigned int __user *)addr); 2019 2020 if (data_size != sizeof(prctl_map)) 2021 return -EINVAL; 2022 2023 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map))) 2024 return -EFAULT; 2025 2026 error = validate_prctl_map_addr(&prctl_map); 2027 if (error) 2028 return error; 2029 2030 if (prctl_map.auxv_size) { 2031 /* 2032 * Someone is trying to cheat the auxv vector. 2033 */ 2034 if (!prctl_map.auxv || 2035 prctl_map.auxv_size > sizeof(mm->saved_auxv)) 2036 return -EINVAL; 2037 2038 memset(user_auxv, 0, sizeof(user_auxv)); 2039 if (copy_from_user(user_auxv, 2040 (const void __user *)prctl_map.auxv, 2041 prctl_map.auxv_size)) 2042 return -EFAULT; 2043 2044 /* Last entry must be AT_NULL as specification requires */ 2045 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL; 2046 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL; 2047 } 2048 2049 if (prctl_map.exe_fd != (u32)-1) { 2050 /* 2051 * Check if the current user is checkpoint/restore capable. 2052 * At the time of this writing, it checks for CAP_SYS_ADMIN 2053 * or CAP_CHECKPOINT_RESTORE. 2054 * Note that a user with access to ptrace can masquerade an 2055 * arbitrary program as any executable, even setuid ones. 2056 * This may have implications in the tomoyo subsystem. 2057 */ 2058 if (!checkpoint_restore_ns_capable(current_user_ns())) 2059 return -EPERM; 2060 2061 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd); 2062 if (error) 2063 return error; 2064 } 2065 2066 /* 2067 * arg_lock protects concurrent updates but we still need mmap_lock for 2068 * read to exclude races with sys_brk. 2069 */ 2070 mmap_read_lock(mm); 2071 2072 /* 2073 * We don't validate if these members are pointing to 2074 * real present VMAs because application may have correspond 2075 * VMAs already unmapped and kernel uses these members for statistics 2076 * output in procfs mostly, except 2077 * 2078 * - @start_brk/@brk which are used in do_brk_flags but kernel lookups 2079 * for VMAs when updating these members so anything wrong written 2080 * here cause kernel to swear at userspace program but won't lead 2081 * to any problem in kernel itself 2082 */ 2083 2084 spin_lock(&mm->arg_lock); 2085 mm->start_code = prctl_map.start_code; 2086 mm->end_code = prctl_map.end_code; 2087 mm->start_data = prctl_map.start_data; 2088 mm->end_data = prctl_map.end_data; 2089 mm->start_brk = prctl_map.start_brk; 2090 mm->brk = prctl_map.brk; 2091 mm->start_stack = prctl_map.start_stack; 2092 mm->arg_start = prctl_map.arg_start; 2093 mm->arg_end = prctl_map.arg_end; 2094 mm->env_start = prctl_map.env_start; 2095 mm->env_end = prctl_map.env_end; 2096 spin_unlock(&mm->arg_lock); 2097 2098 /* 2099 * Note this update of @saved_auxv is lockless thus 2100 * if someone reads this member in procfs while we're 2101 * updating -- it may get partly updated results. It's 2102 * known and acceptable trade off: we leave it as is to 2103 * not introduce additional locks here making the kernel 2104 * more complex. 2105 */ 2106 if (prctl_map.auxv_size) 2107 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv)); 2108 2109 mmap_read_unlock(mm); 2110 return 0; 2111 } 2112 #endif /* CONFIG_CHECKPOINT_RESTORE */ 2113 2114 static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr, 2115 unsigned long len) 2116 { 2117 /* 2118 * This doesn't move the auxiliary vector itself since it's pinned to 2119 * mm_struct, but it permits filling the vector with new values. It's 2120 * up to the caller to provide sane values here, otherwise userspace 2121 * tools which use this vector might be unhappy. 2122 */ 2123 unsigned long user_auxv[AT_VECTOR_SIZE] = {}; 2124 2125 if (len > sizeof(user_auxv)) 2126 return -EINVAL; 2127 2128 if (copy_from_user(user_auxv, (const void __user *)addr, len)) 2129 return -EFAULT; 2130 2131 /* Make sure the last entry is always AT_NULL */ 2132 user_auxv[AT_VECTOR_SIZE - 2] = 0; 2133 user_auxv[AT_VECTOR_SIZE - 1] = 0; 2134 2135 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); 2136 2137 task_lock(current); 2138 memcpy(mm->saved_auxv, user_auxv, len); 2139 task_unlock(current); 2140 2141 return 0; 2142 } 2143 2144 static int prctl_set_mm(int opt, unsigned long addr, 2145 unsigned long arg4, unsigned long arg5) 2146 { 2147 struct mm_struct *mm = current->mm; 2148 struct prctl_mm_map prctl_map = { 2149 .auxv = NULL, 2150 .auxv_size = 0, 2151 .exe_fd = -1, 2152 }; 2153 struct vm_area_struct *vma; 2154 int error; 2155 2156 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV && 2157 opt != PR_SET_MM_MAP && 2158 opt != PR_SET_MM_MAP_SIZE))) 2159 return -EINVAL; 2160 2161 #ifdef CONFIG_CHECKPOINT_RESTORE 2162 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE) 2163 return prctl_set_mm_map(opt, (const void __user *)addr, arg4); 2164 #endif 2165 2166 if (!capable(CAP_SYS_RESOURCE)) 2167 return -EPERM; 2168 2169 if (opt == PR_SET_MM_EXE_FILE) 2170 return prctl_set_mm_exe_file(mm, (unsigned int)addr); 2171 2172 if (opt == PR_SET_MM_AUXV) 2173 return prctl_set_auxv(mm, addr, arg4); 2174 2175 if (addr >= TASK_SIZE || addr < mmap_min_addr) 2176 return -EINVAL; 2177 2178 error = -EINVAL; 2179 2180 /* 2181 * arg_lock protects concurrent updates of arg boundaries, we need 2182 * mmap_lock for a) concurrent sys_brk, b) finding VMA for addr 2183 * validation. 2184 */ 2185 mmap_read_lock(mm); 2186 vma = find_vma(mm, addr); 2187 2188 spin_lock(&mm->arg_lock); 2189 prctl_map.start_code = mm->start_code; 2190 prctl_map.end_code = mm->end_code; 2191 prctl_map.start_data = mm->start_data; 2192 prctl_map.end_data = mm->end_data; 2193 prctl_map.start_brk = mm->start_brk; 2194 prctl_map.brk = mm->brk; 2195 prctl_map.start_stack = mm->start_stack; 2196 prctl_map.arg_start = mm->arg_start; 2197 prctl_map.arg_end = mm->arg_end; 2198 prctl_map.env_start = mm->env_start; 2199 prctl_map.env_end = mm->env_end; 2200 2201 switch (opt) { 2202 case PR_SET_MM_START_CODE: 2203 prctl_map.start_code = addr; 2204 break; 2205 case PR_SET_MM_END_CODE: 2206 prctl_map.end_code = addr; 2207 break; 2208 case PR_SET_MM_START_DATA: 2209 prctl_map.start_data = addr; 2210 break; 2211 case PR_SET_MM_END_DATA: 2212 prctl_map.end_data = addr; 2213 break; 2214 case PR_SET_MM_START_STACK: 2215 prctl_map.start_stack = addr; 2216 break; 2217 case PR_SET_MM_START_BRK: 2218 prctl_map.start_brk = addr; 2219 break; 2220 case PR_SET_MM_BRK: 2221 prctl_map.brk = addr; 2222 break; 2223 case PR_SET_MM_ARG_START: 2224 prctl_map.arg_start = addr; 2225 break; 2226 case PR_SET_MM_ARG_END: 2227 prctl_map.arg_end = addr; 2228 break; 2229 case PR_SET_MM_ENV_START: 2230 prctl_map.env_start = addr; 2231 break; 2232 case PR_SET_MM_ENV_END: 2233 prctl_map.env_end = addr; 2234 break; 2235 default: 2236 goto out; 2237 } 2238 2239 error = validate_prctl_map_addr(&prctl_map); 2240 if (error) 2241 goto out; 2242 2243 switch (opt) { 2244 /* 2245 * If command line arguments and environment 2246 * are placed somewhere else on stack, we can 2247 * set them up here, ARG_START/END to setup 2248 * command line arguments and ENV_START/END 2249 * for environment. 2250 */ 2251 case PR_SET_MM_START_STACK: 2252 case PR_SET_MM_ARG_START: 2253 case PR_SET_MM_ARG_END: 2254 case PR_SET_MM_ENV_START: 2255 case PR_SET_MM_ENV_END: 2256 if (!vma) { 2257 error = -EFAULT; 2258 goto out; 2259 } 2260 } 2261 2262 mm->start_code = prctl_map.start_code; 2263 mm->end_code = prctl_map.end_code; 2264 mm->start_data = prctl_map.start_data; 2265 mm->end_data = prctl_map.end_data; 2266 mm->start_brk = prctl_map.start_brk; 2267 mm->brk = prctl_map.brk; 2268 mm->start_stack = prctl_map.start_stack; 2269 mm->arg_start = prctl_map.arg_start; 2270 mm->arg_end = prctl_map.arg_end; 2271 mm->env_start = prctl_map.env_start; 2272 mm->env_end = prctl_map.env_end; 2273 2274 error = 0; 2275 out: 2276 spin_unlock(&mm->arg_lock); 2277 mmap_read_unlock(mm); 2278 return error; 2279 } 2280 2281 #ifdef CONFIG_CHECKPOINT_RESTORE 2282 static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr) 2283 { 2284 return put_user(me->clear_child_tid, tid_addr); 2285 } 2286 #else 2287 static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr) 2288 { 2289 return -EINVAL; 2290 } 2291 #endif 2292 2293 static int propagate_has_child_subreaper(struct task_struct *p, void *data) 2294 { 2295 /* 2296 * If task has has_child_subreaper - all its descendants 2297 * already have these flag too and new descendants will 2298 * inherit it on fork, skip them. 2299 * 2300 * If we've found child_reaper - skip descendants in 2301 * it's subtree as they will never get out pidns. 2302 */ 2303 if (p->signal->has_child_subreaper || 2304 is_child_reaper(task_pid(p))) 2305 return 0; 2306 2307 p->signal->has_child_subreaper = 1; 2308 return 1; 2309 } 2310 2311 int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which) 2312 { 2313 return -EINVAL; 2314 } 2315 2316 int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which, 2317 unsigned long ctrl) 2318 { 2319 return -EINVAL; 2320 } 2321 2322 int __weak arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status) 2323 { 2324 return -EINVAL; 2325 } 2326 2327 int __weak arch_set_shadow_stack_status(struct task_struct *t, unsigned long status) 2328 { 2329 return -EINVAL; 2330 } 2331 2332 int __weak arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status) 2333 { 2334 return -EINVAL; 2335 } 2336 2337 #define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE) 2338 2339 #ifdef CONFIG_ANON_VMA_NAME 2340 2341 #define ANON_VMA_NAME_MAX_LEN 80 2342 #define ANON_VMA_NAME_INVALID_CHARS "\\`$[]" 2343 2344 static inline bool is_valid_name_char(char ch) 2345 { 2346 /* printable ascii characters, excluding ANON_VMA_NAME_INVALID_CHARS */ 2347 return ch > 0x1f && ch < 0x7f && 2348 !strchr(ANON_VMA_NAME_INVALID_CHARS, ch); 2349 } 2350 2351 static int prctl_set_vma(unsigned long opt, unsigned long addr, 2352 unsigned long size, unsigned long arg) 2353 { 2354 struct mm_struct *mm = current->mm; 2355 const char __user *uname; 2356 struct anon_vma_name *anon_name = NULL; 2357 int error; 2358 2359 switch (opt) { 2360 case PR_SET_VMA_ANON_NAME: 2361 uname = (const char __user *)arg; 2362 if (uname) { 2363 char *name, *pch; 2364 2365 name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN); 2366 if (IS_ERR(name)) 2367 return PTR_ERR(name); 2368 2369 for (pch = name; *pch != '\0'; pch++) { 2370 if (!is_valid_name_char(*pch)) { 2371 kfree(name); 2372 return -EINVAL; 2373 } 2374 } 2375 /* anon_vma has its own copy */ 2376 anon_name = anon_vma_name_alloc(name); 2377 kfree(name); 2378 if (!anon_name) 2379 return -ENOMEM; 2380 2381 } 2382 2383 mmap_write_lock(mm); 2384 error = madvise_set_anon_name(mm, addr, size, anon_name); 2385 mmap_write_unlock(mm); 2386 anon_vma_name_put(anon_name); 2387 break; 2388 default: 2389 error = -EINVAL; 2390 } 2391 2392 return error; 2393 } 2394 2395 #else /* CONFIG_ANON_VMA_NAME */ 2396 static int prctl_set_vma(unsigned long opt, unsigned long start, 2397 unsigned long size, unsigned long arg) 2398 { 2399 return -EINVAL; 2400 } 2401 #endif /* CONFIG_ANON_VMA_NAME */ 2402 2403 static inline unsigned long get_current_mdwe(void) 2404 { 2405 unsigned long ret = 0; 2406 2407 if (test_bit(MMF_HAS_MDWE, ¤t->mm->flags)) 2408 ret |= PR_MDWE_REFUSE_EXEC_GAIN; 2409 if (test_bit(MMF_HAS_MDWE_NO_INHERIT, ¤t->mm->flags)) 2410 ret |= PR_MDWE_NO_INHERIT; 2411 2412 return ret; 2413 } 2414 2415 static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3, 2416 unsigned long arg4, unsigned long arg5) 2417 { 2418 unsigned long current_bits; 2419 2420 if (arg3 || arg4 || arg5) 2421 return -EINVAL; 2422 2423 if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN | PR_MDWE_NO_INHERIT)) 2424 return -EINVAL; 2425 2426 /* NO_INHERIT only makes sense with REFUSE_EXEC_GAIN */ 2427 if (bits & PR_MDWE_NO_INHERIT && !(bits & PR_MDWE_REFUSE_EXEC_GAIN)) 2428 return -EINVAL; 2429 2430 /* 2431 * EOPNOTSUPP might be more appropriate here in principle, but 2432 * existing userspace depends on EINVAL specifically. 2433 */ 2434 if (!arch_memory_deny_write_exec_supported()) 2435 return -EINVAL; 2436 2437 current_bits = get_current_mdwe(); 2438 if (current_bits && current_bits != bits) 2439 return -EPERM; /* Cannot unset the flags */ 2440 2441 if (bits & PR_MDWE_NO_INHERIT) 2442 set_bit(MMF_HAS_MDWE_NO_INHERIT, ¤t->mm->flags); 2443 if (bits & PR_MDWE_REFUSE_EXEC_GAIN) 2444 set_bit(MMF_HAS_MDWE, ¤t->mm->flags); 2445 2446 return 0; 2447 } 2448 2449 static inline int prctl_get_mdwe(unsigned long arg2, unsigned long arg3, 2450 unsigned long arg4, unsigned long arg5) 2451 { 2452 if (arg2 || arg3 || arg4 || arg5) 2453 return -EINVAL; 2454 return get_current_mdwe(); 2455 } 2456 2457 static int prctl_get_auxv(void __user *addr, unsigned long len) 2458 { 2459 struct mm_struct *mm = current->mm; 2460 unsigned long size = min_t(unsigned long, sizeof(mm->saved_auxv), len); 2461 2462 if (size && copy_to_user(addr, mm->saved_auxv, size)) 2463 return -EFAULT; 2464 return sizeof(mm->saved_auxv); 2465 } 2466 2467 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, 2468 unsigned long, arg4, unsigned long, arg5) 2469 { 2470 struct task_struct *me = current; 2471 unsigned char comm[sizeof(me->comm)]; 2472 long error; 2473 2474 error = security_task_prctl(option, arg2, arg3, arg4, arg5); 2475 if (error != -ENOSYS) 2476 return error; 2477 2478 error = 0; 2479 switch (option) { 2480 case PR_SET_PDEATHSIG: 2481 if (!valid_signal(arg2)) { 2482 error = -EINVAL; 2483 break; 2484 } 2485 me->pdeath_signal = arg2; 2486 break; 2487 case PR_GET_PDEATHSIG: 2488 error = put_user(me->pdeath_signal, (int __user *)arg2); 2489 break; 2490 case PR_GET_DUMPABLE: 2491 error = get_dumpable(me->mm); 2492 break; 2493 case PR_SET_DUMPABLE: 2494 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) { 2495 error = -EINVAL; 2496 break; 2497 } 2498 set_dumpable(me->mm, arg2); 2499 break; 2500 2501 case PR_SET_UNALIGN: 2502 error = SET_UNALIGN_CTL(me, arg2); 2503 break; 2504 case PR_GET_UNALIGN: 2505 error = GET_UNALIGN_CTL(me, arg2); 2506 break; 2507 case PR_SET_FPEMU: 2508 error = SET_FPEMU_CTL(me, arg2); 2509 break; 2510 case PR_GET_FPEMU: 2511 error = GET_FPEMU_CTL(me, arg2); 2512 break; 2513 case PR_SET_FPEXC: 2514 error = SET_FPEXC_CTL(me, arg2); 2515 break; 2516 case PR_GET_FPEXC: 2517 error = GET_FPEXC_CTL(me, arg2); 2518 break; 2519 case PR_GET_TIMING: 2520 error = PR_TIMING_STATISTICAL; 2521 break; 2522 case PR_SET_TIMING: 2523 if (arg2 != PR_TIMING_STATISTICAL) 2524 error = -EINVAL; 2525 break; 2526 case PR_SET_NAME: 2527 comm[sizeof(me->comm) - 1] = 0; 2528 if (strncpy_from_user(comm, (char __user *)arg2, 2529 sizeof(me->comm) - 1) < 0) 2530 return -EFAULT; 2531 set_task_comm(me, comm); 2532 proc_comm_connector(me); 2533 break; 2534 case PR_GET_NAME: 2535 get_task_comm(comm, me); 2536 if (copy_to_user((char __user *)arg2, comm, sizeof(comm))) 2537 return -EFAULT; 2538 break; 2539 case PR_GET_ENDIAN: 2540 error = GET_ENDIAN(me, arg2); 2541 break; 2542 case PR_SET_ENDIAN: 2543 error = SET_ENDIAN(me, arg2); 2544 break; 2545 case PR_GET_SECCOMP: 2546 error = prctl_get_seccomp(); 2547 break; 2548 case PR_SET_SECCOMP: 2549 error = prctl_set_seccomp(arg2, (char __user *)arg3); 2550 break; 2551 case PR_GET_TSC: 2552 error = GET_TSC_CTL(arg2); 2553 break; 2554 case PR_SET_TSC: 2555 error = SET_TSC_CTL(arg2); 2556 break; 2557 case PR_TASK_PERF_EVENTS_DISABLE: 2558 error = perf_event_task_disable(); 2559 break; 2560 case PR_TASK_PERF_EVENTS_ENABLE: 2561 error = perf_event_task_enable(); 2562 break; 2563 case PR_GET_TIMERSLACK: 2564 if (current->timer_slack_ns > ULONG_MAX) 2565 error = ULONG_MAX; 2566 else 2567 error = current->timer_slack_ns; 2568 break; 2569 case PR_SET_TIMERSLACK: 2570 if (rt_or_dl_task_policy(current)) 2571 break; 2572 if (arg2 <= 0) 2573 current->timer_slack_ns = 2574 current->default_timer_slack_ns; 2575 else 2576 current->timer_slack_ns = arg2; 2577 break; 2578 case PR_MCE_KILL: 2579 if (arg4 | arg5) 2580 return -EINVAL; 2581 switch (arg2) { 2582 case PR_MCE_KILL_CLEAR: 2583 if (arg3 != 0) 2584 return -EINVAL; 2585 current->flags &= ~PF_MCE_PROCESS; 2586 break; 2587 case PR_MCE_KILL_SET: 2588 current->flags |= PF_MCE_PROCESS; 2589 if (arg3 == PR_MCE_KILL_EARLY) 2590 current->flags |= PF_MCE_EARLY; 2591 else if (arg3 == PR_MCE_KILL_LATE) 2592 current->flags &= ~PF_MCE_EARLY; 2593 else if (arg3 == PR_MCE_KILL_DEFAULT) 2594 current->flags &= 2595 ~(PF_MCE_EARLY|PF_MCE_PROCESS); 2596 else 2597 return -EINVAL; 2598 break; 2599 default: 2600 return -EINVAL; 2601 } 2602 break; 2603 case PR_MCE_KILL_GET: 2604 if (arg2 | arg3 | arg4 | arg5) 2605 return -EINVAL; 2606 if (current->flags & PF_MCE_PROCESS) 2607 error = (current->flags & PF_MCE_EARLY) ? 2608 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; 2609 else 2610 error = PR_MCE_KILL_DEFAULT; 2611 break; 2612 case PR_SET_MM: 2613 error = prctl_set_mm(arg2, arg3, arg4, arg5); 2614 break; 2615 case PR_GET_TID_ADDRESS: 2616 error = prctl_get_tid_address(me, (int __user * __user *)arg2); 2617 break; 2618 case PR_SET_CHILD_SUBREAPER: 2619 me->signal->is_child_subreaper = !!arg2; 2620 if (!arg2) 2621 break; 2622 2623 walk_process_tree(me, propagate_has_child_subreaper, NULL); 2624 break; 2625 case PR_GET_CHILD_SUBREAPER: 2626 error = put_user(me->signal->is_child_subreaper, 2627 (int __user *)arg2); 2628 break; 2629 case PR_SET_NO_NEW_PRIVS: 2630 if (arg2 != 1 || arg3 || arg4 || arg5) 2631 return -EINVAL; 2632 2633 task_set_no_new_privs(current); 2634 break; 2635 case PR_GET_NO_NEW_PRIVS: 2636 if (arg2 || arg3 || arg4 || arg5) 2637 return -EINVAL; 2638 return task_no_new_privs(current) ? 1 : 0; 2639 case PR_GET_THP_DISABLE: 2640 if (arg2 || arg3 || arg4 || arg5) 2641 return -EINVAL; 2642 error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags); 2643 break; 2644 case PR_SET_THP_DISABLE: 2645 if (arg3 || arg4 || arg5) 2646 return -EINVAL; 2647 if (mmap_write_lock_killable(me->mm)) 2648 return -EINTR; 2649 if (arg2) 2650 set_bit(MMF_DISABLE_THP, &me->mm->flags); 2651 else 2652 clear_bit(MMF_DISABLE_THP, &me->mm->flags); 2653 mmap_write_unlock(me->mm); 2654 break; 2655 case PR_MPX_ENABLE_MANAGEMENT: 2656 case PR_MPX_DISABLE_MANAGEMENT: 2657 /* No longer implemented: */ 2658 return -EINVAL; 2659 case PR_SET_FP_MODE: 2660 error = SET_FP_MODE(me, arg2); 2661 break; 2662 case PR_GET_FP_MODE: 2663 error = GET_FP_MODE(me); 2664 break; 2665 case PR_SVE_SET_VL: 2666 error = SVE_SET_VL(arg2); 2667 break; 2668 case PR_SVE_GET_VL: 2669 error = SVE_GET_VL(); 2670 break; 2671 case PR_SME_SET_VL: 2672 error = SME_SET_VL(arg2); 2673 break; 2674 case PR_SME_GET_VL: 2675 error = SME_GET_VL(); 2676 break; 2677 case PR_GET_SPECULATION_CTRL: 2678 if (arg3 || arg4 || arg5) 2679 return -EINVAL; 2680 error = arch_prctl_spec_ctrl_get(me, arg2); 2681 break; 2682 case PR_SET_SPECULATION_CTRL: 2683 if (arg4 || arg5) 2684 return -EINVAL; 2685 error = arch_prctl_spec_ctrl_set(me, arg2, arg3); 2686 break; 2687 case PR_PAC_RESET_KEYS: 2688 if (arg3 || arg4 || arg5) 2689 return -EINVAL; 2690 error = PAC_RESET_KEYS(me, arg2); 2691 break; 2692 case PR_PAC_SET_ENABLED_KEYS: 2693 if (arg4 || arg5) 2694 return -EINVAL; 2695 error = PAC_SET_ENABLED_KEYS(me, arg2, arg3); 2696 break; 2697 case PR_PAC_GET_ENABLED_KEYS: 2698 if (arg2 || arg3 || arg4 || arg5) 2699 return -EINVAL; 2700 error = PAC_GET_ENABLED_KEYS(me); 2701 break; 2702 case PR_SET_TAGGED_ADDR_CTRL: 2703 if (arg3 || arg4 || arg5) 2704 return -EINVAL; 2705 error = SET_TAGGED_ADDR_CTRL(arg2); 2706 break; 2707 case PR_GET_TAGGED_ADDR_CTRL: 2708 if (arg2 || arg3 || arg4 || arg5) 2709 return -EINVAL; 2710 error = GET_TAGGED_ADDR_CTRL(); 2711 break; 2712 case PR_SET_IO_FLUSHER: 2713 if (!capable(CAP_SYS_RESOURCE)) 2714 return -EPERM; 2715 2716 if (arg3 || arg4 || arg5) 2717 return -EINVAL; 2718 2719 if (arg2 == 1) 2720 current->flags |= PR_IO_FLUSHER; 2721 else if (!arg2) 2722 current->flags &= ~PR_IO_FLUSHER; 2723 else 2724 return -EINVAL; 2725 break; 2726 case PR_GET_IO_FLUSHER: 2727 if (!capable(CAP_SYS_RESOURCE)) 2728 return -EPERM; 2729 2730 if (arg2 || arg3 || arg4 || arg5) 2731 return -EINVAL; 2732 2733 error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER; 2734 break; 2735 case PR_SET_SYSCALL_USER_DISPATCH: 2736 error = set_syscall_user_dispatch(arg2, arg3, arg4, 2737 (char __user *) arg5); 2738 break; 2739 #ifdef CONFIG_SCHED_CORE 2740 case PR_SCHED_CORE: 2741 error = sched_core_share_pid(arg2, arg3, arg4, arg5); 2742 break; 2743 #endif 2744 case PR_SET_MDWE: 2745 error = prctl_set_mdwe(arg2, arg3, arg4, arg5); 2746 break; 2747 case PR_GET_MDWE: 2748 error = prctl_get_mdwe(arg2, arg3, arg4, arg5); 2749 break; 2750 case PR_PPC_GET_DEXCR: 2751 if (arg3 || arg4 || arg5) 2752 return -EINVAL; 2753 error = PPC_GET_DEXCR_ASPECT(me, arg2); 2754 break; 2755 case PR_PPC_SET_DEXCR: 2756 if (arg4 || arg5) 2757 return -EINVAL; 2758 error = PPC_SET_DEXCR_ASPECT(me, arg2, arg3); 2759 break; 2760 case PR_SET_VMA: 2761 error = prctl_set_vma(arg2, arg3, arg4, arg5); 2762 break; 2763 case PR_GET_AUXV: 2764 if (arg4 || arg5) 2765 return -EINVAL; 2766 error = prctl_get_auxv((void __user *)arg2, arg3); 2767 break; 2768 #ifdef CONFIG_KSM 2769 case PR_SET_MEMORY_MERGE: 2770 if (arg3 || arg4 || arg5) 2771 return -EINVAL; 2772 if (mmap_write_lock_killable(me->mm)) 2773 return -EINTR; 2774 2775 if (arg2) 2776 error = ksm_enable_merge_any(me->mm); 2777 else 2778 error = ksm_disable_merge_any(me->mm); 2779 mmap_write_unlock(me->mm); 2780 break; 2781 case PR_GET_MEMORY_MERGE: 2782 if (arg2 || arg3 || arg4 || arg5) 2783 return -EINVAL; 2784 2785 error = !!test_bit(MMF_VM_MERGE_ANY, &me->mm->flags); 2786 break; 2787 #endif 2788 case PR_RISCV_V_SET_CONTROL: 2789 error = RISCV_V_SET_CONTROL(arg2); 2790 break; 2791 case PR_RISCV_V_GET_CONTROL: 2792 error = RISCV_V_GET_CONTROL(); 2793 break; 2794 case PR_RISCV_SET_ICACHE_FLUSH_CTX: 2795 error = RISCV_SET_ICACHE_FLUSH_CTX(arg2, arg3); 2796 break; 2797 case PR_GET_SHADOW_STACK_STATUS: 2798 if (arg3 || arg4 || arg5) 2799 return -EINVAL; 2800 error = arch_get_shadow_stack_status(me, (unsigned long __user *) arg2); 2801 break; 2802 case PR_SET_SHADOW_STACK_STATUS: 2803 if (arg3 || arg4 || arg5) 2804 return -EINVAL; 2805 error = arch_set_shadow_stack_status(me, arg2); 2806 break; 2807 case PR_LOCK_SHADOW_STACK_STATUS: 2808 if (arg3 || arg4 || arg5) 2809 return -EINVAL; 2810 error = arch_lock_shadow_stack_status(me, arg2); 2811 break; 2812 default: 2813 error = -EINVAL; 2814 break; 2815 } 2816 return error; 2817 } 2818 2819 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, 2820 struct getcpu_cache __user *, unused) 2821 { 2822 int err = 0; 2823 int cpu = raw_smp_processor_id(); 2824 2825 if (cpup) 2826 err |= put_user(cpu, cpup); 2827 if (nodep) 2828 err |= put_user(cpu_to_node(cpu), nodep); 2829 return err ? -EFAULT : 0; 2830 } 2831 2832 /** 2833 * do_sysinfo - fill in sysinfo struct 2834 * @info: pointer to buffer to fill 2835 */ 2836 static int do_sysinfo(struct sysinfo *info) 2837 { 2838 unsigned long mem_total, sav_total; 2839 unsigned int mem_unit, bitcount; 2840 struct timespec64 tp; 2841 2842 memset(info, 0, sizeof(struct sysinfo)); 2843 2844 ktime_get_boottime_ts64(&tp); 2845 timens_add_boottime(&tp); 2846 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); 2847 2848 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); 2849 2850 info->procs = nr_threads; 2851 2852 si_meminfo(info); 2853 si_swapinfo(info); 2854 2855 /* 2856 * If the sum of all the available memory (i.e. ram + swap) 2857 * is less than can be stored in a 32 bit unsigned long then 2858 * we can be binary compatible with 2.2.x kernels. If not, 2859 * well, in that case 2.2.x was broken anyways... 2860 * 2861 * -Erik Andersen <andersee@debian.org> 2862 */ 2863 2864 mem_total = info->totalram + info->totalswap; 2865 if (mem_total < info->totalram || mem_total < info->totalswap) 2866 goto out; 2867 bitcount = 0; 2868 mem_unit = info->mem_unit; 2869 while (mem_unit > 1) { 2870 bitcount++; 2871 mem_unit >>= 1; 2872 sav_total = mem_total; 2873 mem_total <<= 1; 2874 if (mem_total < sav_total) 2875 goto out; 2876 } 2877 2878 /* 2879 * If mem_total did not overflow, multiply all memory values by 2880 * info->mem_unit and set it to 1. This leaves things compatible 2881 * with 2.2.x, and also retains compatibility with earlier 2.4.x 2882 * kernels... 2883 */ 2884 2885 info->mem_unit = 1; 2886 info->totalram <<= bitcount; 2887 info->freeram <<= bitcount; 2888 info->sharedram <<= bitcount; 2889 info->bufferram <<= bitcount; 2890 info->totalswap <<= bitcount; 2891 info->freeswap <<= bitcount; 2892 info->totalhigh <<= bitcount; 2893 info->freehigh <<= bitcount; 2894 2895 out: 2896 return 0; 2897 } 2898 2899 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info) 2900 { 2901 struct sysinfo val; 2902 2903 do_sysinfo(&val); 2904 2905 if (copy_to_user(info, &val, sizeof(struct sysinfo))) 2906 return -EFAULT; 2907 2908 return 0; 2909 } 2910 2911 #ifdef CONFIG_COMPAT 2912 struct compat_sysinfo { 2913 s32 uptime; 2914 u32 loads[3]; 2915 u32 totalram; 2916 u32 freeram; 2917 u32 sharedram; 2918 u32 bufferram; 2919 u32 totalswap; 2920 u32 freeswap; 2921 u16 procs; 2922 u16 pad; 2923 u32 totalhigh; 2924 u32 freehigh; 2925 u32 mem_unit; 2926 char _f[20-2*sizeof(u32)-sizeof(int)]; 2927 }; 2928 2929 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info) 2930 { 2931 struct sysinfo s; 2932 struct compat_sysinfo s_32; 2933 2934 do_sysinfo(&s); 2935 2936 /* Check to see if any memory value is too large for 32-bit and scale 2937 * down if needed 2938 */ 2939 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) { 2940 int bitcount = 0; 2941 2942 while (s.mem_unit < PAGE_SIZE) { 2943 s.mem_unit <<= 1; 2944 bitcount++; 2945 } 2946 2947 s.totalram >>= bitcount; 2948 s.freeram >>= bitcount; 2949 s.sharedram >>= bitcount; 2950 s.bufferram >>= bitcount; 2951 s.totalswap >>= bitcount; 2952 s.freeswap >>= bitcount; 2953 s.totalhigh >>= bitcount; 2954 s.freehigh >>= bitcount; 2955 } 2956 2957 memset(&s_32, 0, sizeof(s_32)); 2958 s_32.uptime = s.uptime; 2959 s_32.loads[0] = s.loads[0]; 2960 s_32.loads[1] = s.loads[1]; 2961 s_32.loads[2] = s.loads[2]; 2962 s_32.totalram = s.totalram; 2963 s_32.freeram = s.freeram; 2964 s_32.sharedram = s.sharedram; 2965 s_32.bufferram = s.bufferram; 2966 s_32.totalswap = s.totalswap; 2967 s_32.freeswap = s.freeswap; 2968 s_32.procs = s.procs; 2969 s_32.totalhigh = s.totalhigh; 2970 s_32.freehigh = s.freehigh; 2971 s_32.mem_unit = s.mem_unit; 2972 if (copy_to_user(info, &s_32, sizeof(s_32))) 2973 return -EFAULT; 2974 return 0; 2975 } 2976 #endif /* CONFIG_COMPAT */ 2977