1 /* 2 * linux/kernel/sys.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/export.h> 8 #include <linux/mm.h> 9 #include <linux/utsname.h> 10 #include <linux/mman.h> 11 #include <linux/reboot.h> 12 #include <linux/prctl.h> 13 #include <linux/highuid.h> 14 #include <linux/fs.h> 15 #include <linux/kmod.h> 16 #include <linux/perf_event.h> 17 #include <linux/resource.h> 18 #include <linux/kernel.h> 19 #include <linux/workqueue.h> 20 #include <linux/capability.h> 21 #include <linux/device.h> 22 #include <linux/key.h> 23 #include <linux/times.h> 24 #include <linux/posix-timers.h> 25 #include <linux/security.h> 26 #include <linux/dcookies.h> 27 #include <linux/suspend.h> 28 #include <linux/tty.h> 29 #include <linux/signal.h> 30 #include <linux/cn_proc.h> 31 #include <linux/getcpu.h> 32 #include <linux/task_io_accounting_ops.h> 33 #include <linux/seccomp.h> 34 #include <linux/cpu.h> 35 #include <linux/personality.h> 36 #include <linux/ptrace.h> 37 #include <linux/fs_struct.h> 38 #include <linux/file.h> 39 #include <linux/mount.h> 40 #include <linux/gfp.h> 41 #include <linux/syscore_ops.h> 42 #include <linux/version.h> 43 #include <linux/ctype.h> 44 45 #include <linux/compat.h> 46 #include <linux/syscalls.h> 47 #include <linux/kprobes.h> 48 #include <linux/user_namespace.h> 49 #include <linux/binfmts.h> 50 51 #include <linux/sched.h> 52 #include <linux/rcupdate.h> 53 #include <linux/uidgid.h> 54 #include <linux/cred.h> 55 56 #include <linux/kmsg_dump.h> 57 /* Move somewhere else to avoid recompiling? */ 58 #include <generated/utsrelease.h> 59 60 #include <asm/uaccess.h> 61 #include <asm/io.h> 62 #include <asm/unistd.h> 63 64 #ifndef SET_UNALIGN_CTL 65 # define SET_UNALIGN_CTL(a,b) (-EINVAL) 66 #endif 67 #ifndef GET_UNALIGN_CTL 68 # define GET_UNALIGN_CTL(a,b) (-EINVAL) 69 #endif 70 #ifndef SET_FPEMU_CTL 71 # define SET_FPEMU_CTL(a,b) (-EINVAL) 72 #endif 73 #ifndef GET_FPEMU_CTL 74 # define GET_FPEMU_CTL(a,b) (-EINVAL) 75 #endif 76 #ifndef SET_FPEXC_CTL 77 # define SET_FPEXC_CTL(a,b) (-EINVAL) 78 #endif 79 #ifndef GET_FPEXC_CTL 80 # define GET_FPEXC_CTL(a,b) (-EINVAL) 81 #endif 82 #ifndef GET_ENDIAN 83 # define GET_ENDIAN(a,b) (-EINVAL) 84 #endif 85 #ifndef SET_ENDIAN 86 # define SET_ENDIAN(a,b) (-EINVAL) 87 #endif 88 #ifndef GET_TSC_CTL 89 # define GET_TSC_CTL(a) (-EINVAL) 90 #endif 91 #ifndef SET_TSC_CTL 92 # define SET_TSC_CTL(a) (-EINVAL) 93 #endif 94 95 /* 96 * this is where the system-wide overflow UID and GID are defined, for 97 * architectures that now have 32-bit UID/GID but didn't in the past 98 */ 99 100 int overflowuid = DEFAULT_OVERFLOWUID; 101 int overflowgid = DEFAULT_OVERFLOWGID; 102 103 EXPORT_SYMBOL(overflowuid); 104 EXPORT_SYMBOL(overflowgid); 105 106 /* 107 * the same as above, but for filesystems which can only store a 16-bit 108 * UID and GID. as such, this is needed on all architectures 109 */ 110 111 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; 112 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID; 113 114 EXPORT_SYMBOL(fs_overflowuid); 115 EXPORT_SYMBOL(fs_overflowgid); 116 117 /* 118 * Returns true if current's euid is same as p's uid or euid, 119 * or has CAP_SYS_NICE to p's user_ns. 120 * 121 * Called with rcu_read_lock, creds are safe 122 */ 123 static bool set_one_prio_perm(struct task_struct *p) 124 { 125 const struct cred *cred = current_cred(), *pcred = __task_cred(p); 126 127 if (uid_eq(pcred->uid, cred->euid) || 128 uid_eq(pcred->euid, cred->euid)) 129 return true; 130 if (ns_capable(pcred->user_ns, CAP_SYS_NICE)) 131 return true; 132 return false; 133 } 134 135 /* 136 * set the priority of a task 137 * - the caller must hold the RCU read lock 138 */ 139 static int set_one_prio(struct task_struct *p, int niceval, int error) 140 { 141 int no_nice; 142 143 if (!set_one_prio_perm(p)) { 144 error = -EPERM; 145 goto out; 146 } 147 if (niceval < task_nice(p) && !can_nice(p, niceval)) { 148 error = -EACCES; 149 goto out; 150 } 151 no_nice = security_task_setnice(p, niceval); 152 if (no_nice) { 153 error = no_nice; 154 goto out; 155 } 156 if (error == -ESRCH) 157 error = 0; 158 set_user_nice(p, niceval); 159 out: 160 return error; 161 } 162 163 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) 164 { 165 struct task_struct *g, *p; 166 struct user_struct *user; 167 const struct cred *cred = current_cred(); 168 int error = -EINVAL; 169 struct pid *pgrp; 170 kuid_t uid; 171 172 if (which > PRIO_USER || which < PRIO_PROCESS) 173 goto out; 174 175 /* normalize: avoid signed division (rounding problems) */ 176 error = -ESRCH; 177 if (niceval < -20) 178 niceval = -20; 179 if (niceval > 19) 180 niceval = 19; 181 182 rcu_read_lock(); 183 read_lock(&tasklist_lock); 184 switch (which) { 185 case PRIO_PROCESS: 186 if (who) 187 p = find_task_by_vpid(who); 188 else 189 p = current; 190 if (p) 191 error = set_one_prio(p, niceval, error); 192 break; 193 case PRIO_PGRP: 194 if (who) 195 pgrp = find_vpid(who); 196 else 197 pgrp = task_pgrp(current); 198 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 199 error = set_one_prio(p, niceval, error); 200 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 201 break; 202 case PRIO_USER: 203 uid = make_kuid(cred->user_ns, who); 204 user = cred->user; 205 if (!who) 206 uid = cred->uid; 207 else if (!uid_eq(uid, cred->uid) && 208 !(user = find_user(uid))) 209 goto out_unlock; /* No processes for this user */ 210 211 do_each_thread(g, p) { 212 if (uid_eq(task_uid(p), uid)) 213 error = set_one_prio(p, niceval, error); 214 } while_each_thread(g, p); 215 if (!uid_eq(uid, cred->uid)) 216 free_uid(user); /* For find_user() */ 217 break; 218 } 219 out_unlock: 220 read_unlock(&tasklist_lock); 221 rcu_read_unlock(); 222 out: 223 return error; 224 } 225 226 /* 227 * Ugh. To avoid negative return values, "getpriority()" will 228 * not return the normal nice-value, but a negated value that 229 * has been offset by 20 (ie it returns 40..1 instead of -20..19) 230 * to stay compatible. 231 */ 232 SYSCALL_DEFINE2(getpriority, int, which, int, who) 233 { 234 struct task_struct *g, *p; 235 struct user_struct *user; 236 const struct cred *cred = current_cred(); 237 long niceval, retval = -ESRCH; 238 struct pid *pgrp; 239 kuid_t uid; 240 241 if (which > PRIO_USER || which < PRIO_PROCESS) 242 return -EINVAL; 243 244 rcu_read_lock(); 245 read_lock(&tasklist_lock); 246 switch (which) { 247 case PRIO_PROCESS: 248 if (who) 249 p = find_task_by_vpid(who); 250 else 251 p = current; 252 if (p) { 253 niceval = 20 - task_nice(p); 254 if (niceval > retval) 255 retval = niceval; 256 } 257 break; 258 case PRIO_PGRP: 259 if (who) 260 pgrp = find_vpid(who); 261 else 262 pgrp = task_pgrp(current); 263 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 264 niceval = 20 - task_nice(p); 265 if (niceval > retval) 266 retval = niceval; 267 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 268 break; 269 case PRIO_USER: 270 uid = make_kuid(cred->user_ns, who); 271 user = cred->user; 272 if (!who) 273 uid = cred->uid; 274 else if (!uid_eq(uid, cred->uid) && 275 !(user = find_user(uid))) 276 goto out_unlock; /* No processes for this user */ 277 278 do_each_thread(g, p) { 279 if (uid_eq(task_uid(p), uid)) { 280 niceval = 20 - task_nice(p); 281 if (niceval > retval) 282 retval = niceval; 283 } 284 } while_each_thread(g, p); 285 if (!uid_eq(uid, cred->uid)) 286 free_uid(user); /* for find_user() */ 287 break; 288 } 289 out_unlock: 290 read_unlock(&tasklist_lock); 291 rcu_read_unlock(); 292 293 return retval; 294 } 295 296 /* 297 * Unprivileged users may change the real gid to the effective gid 298 * or vice versa. (BSD-style) 299 * 300 * If you set the real gid at all, or set the effective gid to a value not 301 * equal to the real gid, then the saved gid is set to the new effective gid. 302 * 303 * This makes it possible for a setgid program to completely drop its 304 * privileges, which is often a useful assertion to make when you are doing 305 * a security audit over a program. 306 * 307 * The general idea is that a program which uses just setregid() will be 308 * 100% compatible with BSD. A program which uses just setgid() will be 309 * 100% compatible with POSIX with saved IDs. 310 * 311 * SMP: There are not races, the GIDs are checked only by filesystem 312 * operations (as far as semantic preservation is concerned). 313 */ 314 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) 315 { 316 struct user_namespace *ns = current_user_ns(); 317 const struct cred *old; 318 struct cred *new; 319 int retval; 320 kgid_t krgid, kegid; 321 322 krgid = make_kgid(ns, rgid); 323 kegid = make_kgid(ns, egid); 324 325 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 326 return -EINVAL; 327 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 328 return -EINVAL; 329 330 new = prepare_creds(); 331 if (!new) 332 return -ENOMEM; 333 old = current_cred(); 334 335 retval = -EPERM; 336 if (rgid != (gid_t) -1) { 337 if (gid_eq(old->gid, krgid) || 338 gid_eq(old->egid, krgid) || 339 ns_capable(old->user_ns, CAP_SETGID)) 340 new->gid = krgid; 341 else 342 goto error; 343 } 344 if (egid != (gid_t) -1) { 345 if (gid_eq(old->gid, kegid) || 346 gid_eq(old->egid, kegid) || 347 gid_eq(old->sgid, kegid) || 348 ns_capable(old->user_ns, CAP_SETGID)) 349 new->egid = kegid; 350 else 351 goto error; 352 } 353 354 if (rgid != (gid_t) -1 || 355 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid))) 356 new->sgid = new->egid; 357 new->fsgid = new->egid; 358 359 return commit_creds(new); 360 361 error: 362 abort_creds(new); 363 return retval; 364 } 365 366 /* 367 * setgid() is implemented like SysV w/ SAVED_IDS 368 * 369 * SMP: Same implicit races as above. 370 */ 371 SYSCALL_DEFINE1(setgid, gid_t, gid) 372 { 373 struct user_namespace *ns = current_user_ns(); 374 const struct cred *old; 375 struct cred *new; 376 int retval; 377 kgid_t kgid; 378 379 kgid = make_kgid(ns, gid); 380 if (!gid_valid(kgid)) 381 return -EINVAL; 382 383 new = prepare_creds(); 384 if (!new) 385 return -ENOMEM; 386 old = current_cred(); 387 388 retval = -EPERM; 389 if (ns_capable(old->user_ns, CAP_SETGID)) 390 new->gid = new->egid = new->sgid = new->fsgid = kgid; 391 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid)) 392 new->egid = new->fsgid = kgid; 393 else 394 goto error; 395 396 return commit_creds(new); 397 398 error: 399 abort_creds(new); 400 return retval; 401 } 402 403 /* 404 * change the user struct in a credentials set to match the new UID 405 */ 406 static int set_user(struct cred *new) 407 { 408 struct user_struct *new_user; 409 410 new_user = alloc_uid(new->uid); 411 if (!new_user) 412 return -EAGAIN; 413 414 /* 415 * We don't fail in case of NPROC limit excess here because too many 416 * poorly written programs don't check set*uid() return code, assuming 417 * it never fails if called by root. We may still enforce NPROC limit 418 * for programs doing set*uid()+execve() by harmlessly deferring the 419 * failure to the execve() stage. 420 */ 421 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) && 422 new_user != INIT_USER) 423 current->flags |= PF_NPROC_EXCEEDED; 424 else 425 current->flags &= ~PF_NPROC_EXCEEDED; 426 427 free_uid(new->user); 428 new->user = new_user; 429 return 0; 430 } 431 432 /* 433 * Unprivileged users may change the real uid to the effective uid 434 * or vice versa. (BSD-style) 435 * 436 * If you set the real uid at all, or set the effective uid to a value not 437 * equal to the real uid, then the saved uid is set to the new effective uid. 438 * 439 * This makes it possible for a setuid program to completely drop its 440 * privileges, which is often a useful assertion to make when you are doing 441 * a security audit over a program. 442 * 443 * The general idea is that a program which uses just setreuid() will be 444 * 100% compatible with BSD. A program which uses just setuid() will be 445 * 100% compatible with POSIX with saved IDs. 446 */ 447 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) 448 { 449 struct user_namespace *ns = current_user_ns(); 450 const struct cred *old; 451 struct cred *new; 452 int retval; 453 kuid_t kruid, keuid; 454 455 kruid = make_kuid(ns, ruid); 456 keuid = make_kuid(ns, euid); 457 458 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 459 return -EINVAL; 460 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 461 return -EINVAL; 462 463 new = prepare_creds(); 464 if (!new) 465 return -ENOMEM; 466 old = current_cred(); 467 468 retval = -EPERM; 469 if (ruid != (uid_t) -1) { 470 new->uid = kruid; 471 if (!uid_eq(old->uid, kruid) && 472 !uid_eq(old->euid, kruid) && 473 !ns_capable(old->user_ns, CAP_SETUID)) 474 goto error; 475 } 476 477 if (euid != (uid_t) -1) { 478 new->euid = keuid; 479 if (!uid_eq(old->uid, keuid) && 480 !uid_eq(old->euid, keuid) && 481 !uid_eq(old->suid, keuid) && 482 !ns_capable(old->user_ns, CAP_SETUID)) 483 goto error; 484 } 485 486 if (!uid_eq(new->uid, old->uid)) { 487 retval = set_user(new); 488 if (retval < 0) 489 goto error; 490 } 491 if (ruid != (uid_t) -1 || 492 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid))) 493 new->suid = new->euid; 494 new->fsuid = new->euid; 495 496 retval = security_task_fix_setuid(new, old, LSM_SETID_RE); 497 if (retval < 0) 498 goto error; 499 500 return commit_creds(new); 501 502 error: 503 abort_creds(new); 504 return retval; 505 } 506 507 /* 508 * setuid() is implemented like SysV with SAVED_IDS 509 * 510 * Note that SAVED_ID's is deficient in that a setuid root program 511 * like sendmail, for example, cannot set its uid to be a normal 512 * user and then switch back, because if you're root, setuid() sets 513 * the saved uid too. If you don't like this, blame the bright people 514 * in the POSIX committee and/or USG. Note that the BSD-style setreuid() 515 * will allow a root program to temporarily drop privileges and be able to 516 * regain them by swapping the real and effective uid. 517 */ 518 SYSCALL_DEFINE1(setuid, uid_t, uid) 519 { 520 struct user_namespace *ns = current_user_ns(); 521 const struct cred *old; 522 struct cred *new; 523 int retval; 524 kuid_t kuid; 525 526 kuid = make_kuid(ns, uid); 527 if (!uid_valid(kuid)) 528 return -EINVAL; 529 530 new = prepare_creds(); 531 if (!new) 532 return -ENOMEM; 533 old = current_cred(); 534 535 retval = -EPERM; 536 if (ns_capable(old->user_ns, CAP_SETUID)) { 537 new->suid = new->uid = kuid; 538 if (!uid_eq(kuid, old->uid)) { 539 retval = set_user(new); 540 if (retval < 0) 541 goto error; 542 } 543 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) { 544 goto error; 545 } 546 547 new->fsuid = new->euid = kuid; 548 549 retval = security_task_fix_setuid(new, old, LSM_SETID_ID); 550 if (retval < 0) 551 goto error; 552 553 return commit_creds(new); 554 555 error: 556 abort_creds(new); 557 return retval; 558 } 559 560 561 /* 562 * This function implements a generic ability to update ruid, euid, 563 * and suid. This allows you to implement the 4.4 compatible seteuid(). 564 */ 565 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 566 { 567 struct user_namespace *ns = current_user_ns(); 568 const struct cred *old; 569 struct cred *new; 570 int retval; 571 kuid_t kruid, keuid, ksuid; 572 573 kruid = make_kuid(ns, ruid); 574 keuid = make_kuid(ns, euid); 575 ksuid = make_kuid(ns, suid); 576 577 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 578 return -EINVAL; 579 580 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 581 return -EINVAL; 582 583 if ((suid != (uid_t) -1) && !uid_valid(ksuid)) 584 return -EINVAL; 585 586 new = prepare_creds(); 587 if (!new) 588 return -ENOMEM; 589 590 old = current_cred(); 591 592 retval = -EPERM; 593 if (!ns_capable(old->user_ns, CAP_SETUID)) { 594 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && 595 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid)) 596 goto error; 597 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) && 598 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid)) 599 goto error; 600 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) && 601 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid)) 602 goto error; 603 } 604 605 if (ruid != (uid_t) -1) { 606 new->uid = kruid; 607 if (!uid_eq(kruid, old->uid)) { 608 retval = set_user(new); 609 if (retval < 0) 610 goto error; 611 } 612 } 613 if (euid != (uid_t) -1) 614 new->euid = keuid; 615 if (suid != (uid_t) -1) 616 new->suid = ksuid; 617 new->fsuid = new->euid; 618 619 retval = security_task_fix_setuid(new, old, LSM_SETID_RES); 620 if (retval < 0) 621 goto error; 622 623 return commit_creds(new); 624 625 error: 626 abort_creds(new); 627 return retval; 628 } 629 630 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp) 631 { 632 const struct cred *cred = current_cred(); 633 int retval; 634 uid_t ruid, euid, suid; 635 636 ruid = from_kuid_munged(cred->user_ns, cred->uid); 637 euid = from_kuid_munged(cred->user_ns, cred->euid); 638 suid = from_kuid_munged(cred->user_ns, cred->suid); 639 640 if (!(retval = put_user(ruid, ruidp)) && 641 !(retval = put_user(euid, euidp))) 642 retval = put_user(suid, suidp); 643 644 return retval; 645 } 646 647 /* 648 * Same as above, but for rgid, egid, sgid. 649 */ 650 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 651 { 652 struct user_namespace *ns = current_user_ns(); 653 const struct cred *old; 654 struct cred *new; 655 int retval; 656 kgid_t krgid, kegid, ksgid; 657 658 krgid = make_kgid(ns, rgid); 659 kegid = make_kgid(ns, egid); 660 ksgid = make_kgid(ns, sgid); 661 662 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 663 return -EINVAL; 664 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 665 return -EINVAL; 666 if ((sgid != (gid_t) -1) && !gid_valid(ksgid)) 667 return -EINVAL; 668 669 new = prepare_creds(); 670 if (!new) 671 return -ENOMEM; 672 old = current_cred(); 673 674 retval = -EPERM; 675 if (!ns_capable(old->user_ns, CAP_SETGID)) { 676 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && 677 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid)) 678 goto error; 679 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) && 680 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid)) 681 goto error; 682 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) && 683 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid)) 684 goto error; 685 } 686 687 if (rgid != (gid_t) -1) 688 new->gid = krgid; 689 if (egid != (gid_t) -1) 690 new->egid = kegid; 691 if (sgid != (gid_t) -1) 692 new->sgid = ksgid; 693 new->fsgid = new->egid; 694 695 return commit_creds(new); 696 697 error: 698 abort_creds(new); 699 return retval; 700 } 701 702 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp) 703 { 704 const struct cred *cred = current_cred(); 705 int retval; 706 gid_t rgid, egid, sgid; 707 708 rgid = from_kgid_munged(cred->user_ns, cred->gid); 709 egid = from_kgid_munged(cred->user_ns, cred->egid); 710 sgid = from_kgid_munged(cred->user_ns, cred->sgid); 711 712 if (!(retval = put_user(rgid, rgidp)) && 713 !(retval = put_user(egid, egidp))) 714 retval = put_user(sgid, sgidp); 715 716 return retval; 717 } 718 719 720 /* 721 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This 722 * is used for "access()" and for the NFS daemon (letting nfsd stay at 723 * whatever uid it wants to). It normally shadows "euid", except when 724 * explicitly set by setfsuid() or for access.. 725 */ 726 SYSCALL_DEFINE1(setfsuid, uid_t, uid) 727 { 728 const struct cred *old; 729 struct cred *new; 730 uid_t old_fsuid; 731 kuid_t kuid; 732 733 old = current_cred(); 734 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid); 735 736 kuid = make_kuid(old->user_ns, uid); 737 if (!uid_valid(kuid)) 738 return old_fsuid; 739 740 new = prepare_creds(); 741 if (!new) 742 return old_fsuid; 743 744 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) || 745 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) || 746 ns_capable(old->user_ns, CAP_SETUID)) { 747 if (!uid_eq(kuid, old->fsuid)) { 748 new->fsuid = kuid; 749 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) 750 goto change_okay; 751 } 752 } 753 754 abort_creds(new); 755 return old_fsuid; 756 757 change_okay: 758 commit_creds(new); 759 return old_fsuid; 760 } 761 762 /* 763 * Samma på svenska.. 764 */ 765 SYSCALL_DEFINE1(setfsgid, gid_t, gid) 766 { 767 const struct cred *old; 768 struct cred *new; 769 gid_t old_fsgid; 770 kgid_t kgid; 771 772 old = current_cred(); 773 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid); 774 775 kgid = make_kgid(old->user_ns, gid); 776 if (!gid_valid(kgid)) 777 return old_fsgid; 778 779 new = prepare_creds(); 780 if (!new) 781 return old_fsgid; 782 783 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) || 784 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) || 785 ns_capable(old->user_ns, CAP_SETGID)) { 786 if (!gid_eq(kgid, old->fsgid)) { 787 new->fsgid = kgid; 788 goto change_okay; 789 } 790 } 791 792 abort_creds(new); 793 return old_fsgid; 794 795 change_okay: 796 commit_creds(new); 797 return old_fsgid; 798 } 799 800 /** 801 * sys_getpid - return the thread group id of the current process 802 * 803 * Note, despite the name, this returns the tgid not the pid. The tgid and 804 * the pid are identical unless CLONE_THREAD was specified on clone() in 805 * which case the tgid is the same in all threads of the same group. 806 * 807 * This is SMP safe as current->tgid does not change. 808 */ 809 SYSCALL_DEFINE0(getpid) 810 { 811 return task_tgid_vnr(current); 812 } 813 814 /* Thread ID - the internal kernel "pid" */ 815 SYSCALL_DEFINE0(gettid) 816 { 817 return task_pid_vnr(current); 818 } 819 820 /* 821 * Accessing ->real_parent is not SMP-safe, it could 822 * change from under us. However, we can use a stale 823 * value of ->real_parent under rcu_read_lock(), see 824 * release_task()->call_rcu(delayed_put_task_struct). 825 */ 826 SYSCALL_DEFINE0(getppid) 827 { 828 int pid; 829 830 rcu_read_lock(); 831 pid = task_tgid_vnr(rcu_dereference(current->real_parent)); 832 rcu_read_unlock(); 833 834 return pid; 835 } 836 837 SYSCALL_DEFINE0(getuid) 838 { 839 /* Only we change this so SMP safe */ 840 return from_kuid_munged(current_user_ns(), current_uid()); 841 } 842 843 SYSCALL_DEFINE0(geteuid) 844 { 845 /* Only we change this so SMP safe */ 846 return from_kuid_munged(current_user_ns(), current_euid()); 847 } 848 849 SYSCALL_DEFINE0(getgid) 850 { 851 /* Only we change this so SMP safe */ 852 return from_kgid_munged(current_user_ns(), current_gid()); 853 } 854 855 SYSCALL_DEFINE0(getegid) 856 { 857 /* Only we change this so SMP safe */ 858 return from_kgid_munged(current_user_ns(), current_egid()); 859 } 860 861 void do_sys_times(struct tms *tms) 862 { 863 cputime_t tgutime, tgstime, cutime, cstime; 864 865 spin_lock_irq(¤t->sighand->siglock); 866 thread_group_cputime_adjusted(current, &tgutime, &tgstime); 867 cutime = current->signal->cutime; 868 cstime = current->signal->cstime; 869 spin_unlock_irq(¤t->sighand->siglock); 870 tms->tms_utime = cputime_to_clock_t(tgutime); 871 tms->tms_stime = cputime_to_clock_t(tgstime); 872 tms->tms_cutime = cputime_to_clock_t(cutime); 873 tms->tms_cstime = cputime_to_clock_t(cstime); 874 } 875 876 SYSCALL_DEFINE1(times, struct tms __user *, tbuf) 877 { 878 if (tbuf) { 879 struct tms tmp; 880 881 do_sys_times(&tmp); 882 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) 883 return -EFAULT; 884 } 885 force_successful_syscall_return(); 886 return (long) jiffies_64_to_clock_t(get_jiffies_64()); 887 } 888 889 /* 890 * This needs some heavy checking ... 891 * I just haven't the stomach for it. I also don't fully 892 * understand sessions/pgrp etc. Let somebody who does explain it. 893 * 894 * OK, I think I have the protection semantics right.... this is really 895 * only important on a multi-user system anyway, to make sure one user 896 * can't send a signal to a process owned by another. -TYT, 12/12/91 897 * 898 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. 899 * LBT 04.03.94 900 */ 901 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) 902 { 903 struct task_struct *p; 904 struct task_struct *group_leader = current->group_leader; 905 struct pid *pgrp; 906 int err; 907 908 if (!pid) 909 pid = task_pid_vnr(group_leader); 910 if (!pgid) 911 pgid = pid; 912 if (pgid < 0) 913 return -EINVAL; 914 rcu_read_lock(); 915 916 /* From this point forward we keep holding onto the tasklist lock 917 * so that our parent does not change from under us. -DaveM 918 */ 919 write_lock_irq(&tasklist_lock); 920 921 err = -ESRCH; 922 p = find_task_by_vpid(pid); 923 if (!p) 924 goto out; 925 926 err = -EINVAL; 927 if (!thread_group_leader(p)) 928 goto out; 929 930 if (same_thread_group(p->real_parent, group_leader)) { 931 err = -EPERM; 932 if (task_session(p) != task_session(group_leader)) 933 goto out; 934 err = -EACCES; 935 if (p->did_exec) 936 goto out; 937 } else { 938 err = -ESRCH; 939 if (p != group_leader) 940 goto out; 941 } 942 943 err = -EPERM; 944 if (p->signal->leader) 945 goto out; 946 947 pgrp = task_pid(p); 948 if (pgid != pid) { 949 struct task_struct *g; 950 951 pgrp = find_vpid(pgid); 952 g = pid_task(pgrp, PIDTYPE_PGID); 953 if (!g || task_session(g) != task_session(group_leader)) 954 goto out; 955 } 956 957 err = security_task_setpgid(p, pgid); 958 if (err) 959 goto out; 960 961 if (task_pgrp(p) != pgrp) 962 change_pid(p, PIDTYPE_PGID, pgrp); 963 964 err = 0; 965 out: 966 /* All paths lead to here, thus we are safe. -DaveM */ 967 write_unlock_irq(&tasklist_lock); 968 rcu_read_unlock(); 969 return err; 970 } 971 972 SYSCALL_DEFINE1(getpgid, pid_t, pid) 973 { 974 struct task_struct *p; 975 struct pid *grp; 976 int retval; 977 978 rcu_read_lock(); 979 if (!pid) 980 grp = task_pgrp(current); 981 else { 982 retval = -ESRCH; 983 p = find_task_by_vpid(pid); 984 if (!p) 985 goto out; 986 grp = task_pgrp(p); 987 if (!grp) 988 goto out; 989 990 retval = security_task_getpgid(p); 991 if (retval) 992 goto out; 993 } 994 retval = pid_vnr(grp); 995 out: 996 rcu_read_unlock(); 997 return retval; 998 } 999 1000 #ifdef __ARCH_WANT_SYS_GETPGRP 1001 1002 SYSCALL_DEFINE0(getpgrp) 1003 { 1004 return sys_getpgid(0); 1005 } 1006 1007 #endif 1008 1009 SYSCALL_DEFINE1(getsid, pid_t, pid) 1010 { 1011 struct task_struct *p; 1012 struct pid *sid; 1013 int retval; 1014 1015 rcu_read_lock(); 1016 if (!pid) 1017 sid = task_session(current); 1018 else { 1019 retval = -ESRCH; 1020 p = find_task_by_vpid(pid); 1021 if (!p) 1022 goto out; 1023 sid = task_session(p); 1024 if (!sid) 1025 goto out; 1026 1027 retval = security_task_getsid(p); 1028 if (retval) 1029 goto out; 1030 } 1031 retval = pid_vnr(sid); 1032 out: 1033 rcu_read_unlock(); 1034 return retval; 1035 } 1036 1037 static void set_special_pids(struct pid *pid) 1038 { 1039 struct task_struct *curr = current->group_leader; 1040 1041 if (task_session(curr) != pid) 1042 change_pid(curr, PIDTYPE_SID, pid); 1043 1044 if (task_pgrp(curr) != pid) 1045 change_pid(curr, PIDTYPE_PGID, pid); 1046 } 1047 1048 SYSCALL_DEFINE0(setsid) 1049 { 1050 struct task_struct *group_leader = current->group_leader; 1051 struct pid *sid = task_pid(group_leader); 1052 pid_t session = pid_vnr(sid); 1053 int err = -EPERM; 1054 1055 write_lock_irq(&tasklist_lock); 1056 /* Fail if I am already a session leader */ 1057 if (group_leader->signal->leader) 1058 goto out; 1059 1060 /* Fail if a process group id already exists that equals the 1061 * proposed session id. 1062 */ 1063 if (pid_task(sid, PIDTYPE_PGID)) 1064 goto out; 1065 1066 group_leader->signal->leader = 1; 1067 set_special_pids(sid); 1068 1069 proc_clear_tty(group_leader); 1070 1071 err = session; 1072 out: 1073 write_unlock_irq(&tasklist_lock); 1074 if (err > 0) { 1075 proc_sid_connector(group_leader); 1076 sched_autogroup_create_attach(group_leader); 1077 } 1078 return err; 1079 } 1080 1081 DECLARE_RWSEM(uts_sem); 1082 1083 #ifdef COMPAT_UTS_MACHINE 1084 #define override_architecture(name) \ 1085 (personality(current->personality) == PER_LINUX32 && \ 1086 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ 1087 sizeof(COMPAT_UTS_MACHINE))) 1088 #else 1089 #define override_architecture(name) 0 1090 #endif 1091 1092 /* 1093 * Work around broken programs that cannot handle "Linux 3.0". 1094 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 1095 */ 1096 static int override_release(char __user *release, size_t len) 1097 { 1098 int ret = 0; 1099 1100 if (current->personality & UNAME26) { 1101 const char *rest = UTS_RELEASE; 1102 char buf[65] = { 0 }; 1103 int ndots = 0; 1104 unsigned v; 1105 size_t copy; 1106 1107 while (*rest) { 1108 if (*rest == '.' && ++ndots >= 3) 1109 break; 1110 if (!isdigit(*rest) && *rest != '.') 1111 break; 1112 rest++; 1113 } 1114 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; 1115 copy = clamp_t(size_t, len, 1, sizeof(buf)); 1116 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); 1117 ret = copy_to_user(release, buf, copy + 1); 1118 } 1119 return ret; 1120 } 1121 1122 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) 1123 { 1124 int errno = 0; 1125 1126 down_read(&uts_sem); 1127 if (copy_to_user(name, utsname(), sizeof *name)) 1128 errno = -EFAULT; 1129 up_read(&uts_sem); 1130 1131 if (!errno && override_release(name->release, sizeof(name->release))) 1132 errno = -EFAULT; 1133 if (!errno && override_architecture(name)) 1134 errno = -EFAULT; 1135 return errno; 1136 } 1137 1138 #ifdef __ARCH_WANT_SYS_OLD_UNAME 1139 /* 1140 * Old cruft 1141 */ 1142 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) 1143 { 1144 int error = 0; 1145 1146 if (!name) 1147 return -EFAULT; 1148 1149 down_read(&uts_sem); 1150 if (copy_to_user(name, utsname(), sizeof(*name))) 1151 error = -EFAULT; 1152 up_read(&uts_sem); 1153 1154 if (!error && override_release(name->release, sizeof(name->release))) 1155 error = -EFAULT; 1156 if (!error && override_architecture(name)) 1157 error = -EFAULT; 1158 return error; 1159 } 1160 1161 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) 1162 { 1163 int error; 1164 1165 if (!name) 1166 return -EFAULT; 1167 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname))) 1168 return -EFAULT; 1169 1170 down_read(&uts_sem); 1171 error = __copy_to_user(&name->sysname, &utsname()->sysname, 1172 __OLD_UTS_LEN); 1173 error |= __put_user(0, name->sysname + __OLD_UTS_LEN); 1174 error |= __copy_to_user(&name->nodename, &utsname()->nodename, 1175 __OLD_UTS_LEN); 1176 error |= __put_user(0, name->nodename + __OLD_UTS_LEN); 1177 error |= __copy_to_user(&name->release, &utsname()->release, 1178 __OLD_UTS_LEN); 1179 error |= __put_user(0, name->release + __OLD_UTS_LEN); 1180 error |= __copy_to_user(&name->version, &utsname()->version, 1181 __OLD_UTS_LEN); 1182 error |= __put_user(0, name->version + __OLD_UTS_LEN); 1183 error |= __copy_to_user(&name->machine, &utsname()->machine, 1184 __OLD_UTS_LEN); 1185 error |= __put_user(0, name->machine + __OLD_UTS_LEN); 1186 up_read(&uts_sem); 1187 1188 if (!error && override_architecture(name)) 1189 error = -EFAULT; 1190 if (!error && override_release(name->release, sizeof(name->release))) 1191 error = -EFAULT; 1192 return error ? -EFAULT : 0; 1193 } 1194 #endif 1195 1196 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) 1197 { 1198 int errno; 1199 char tmp[__NEW_UTS_LEN]; 1200 1201 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1202 return -EPERM; 1203 1204 if (len < 0 || len > __NEW_UTS_LEN) 1205 return -EINVAL; 1206 down_write(&uts_sem); 1207 errno = -EFAULT; 1208 if (!copy_from_user(tmp, name, len)) { 1209 struct new_utsname *u = utsname(); 1210 1211 memcpy(u->nodename, tmp, len); 1212 memset(u->nodename + len, 0, sizeof(u->nodename) - len); 1213 errno = 0; 1214 uts_proc_notify(UTS_PROC_HOSTNAME); 1215 } 1216 up_write(&uts_sem); 1217 return errno; 1218 } 1219 1220 #ifdef __ARCH_WANT_SYS_GETHOSTNAME 1221 1222 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) 1223 { 1224 int i, errno; 1225 struct new_utsname *u; 1226 1227 if (len < 0) 1228 return -EINVAL; 1229 down_read(&uts_sem); 1230 u = utsname(); 1231 i = 1 + strlen(u->nodename); 1232 if (i > len) 1233 i = len; 1234 errno = 0; 1235 if (copy_to_user(name, u->nodename, i)) 1236 errno = -EFAULT; 1237 up_read(&uts_sem); 1238 return errno; 1239 } 1240 1241 #endif 1242 1243 /* 1244 * Only setdomainname; getdomainname can be implemented by calling 1245 * uname() 1246 */ 1247 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) 1248 { 1249 int errno; 1250 char tmp[__NEW_UTS_LEN]; 1251 1252 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1253 return -EPERM; 1254 if (len < 0 || len > __NEW_UTS_LEN) 1255 return -EINVAL; 1256 1257 down_write(&uts_sem); 1258 errno = -EFAULT; 1259 if (!copy_from_user(tmp, name, len)) { 1260 struct new_utsname *u = utsname(); 1261 1262 memcpy(u->domainname, tmp, len); 1263 memset(u->domainname + len, 0, sizeof(u->domainname) - len); 1264 errno = 0; 1265 uts_proc_notify(UTS_PROC_DOMAINNAME); 1266 } 1267 up_write(&uts_sem); 1268 return errno; 1269 } 1270 1271 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1272 { 1273 struct rlimit value; 1274 int ret; 1275 1276 ret = do_prlimit(current, resource, NULL, &value); 1277 if (!ret) 1278 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; 1279 1280 return ret; 1281 } 1282 1283 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT 1284 1285 /* 1286 * Back compatibility for getrlimit. Needed for some apps. 1287 */ 1288 1289 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, 1290 struct rlimit __user *, rlim) 1291 { 1292 struct rlimit x; 1293 if (resource >= RLIM_NLIMITS) 1294 return -EINVAL; 1295 1296 task_lock(current->group_leader); 1297 x = current->signal->rlim[resource]; 1298 task_unlock(current->group_leader); 1299 if (x.rlim_cur > 0x7FFFFFFF) 1300 x.rlim_cur = 0x7FFFFFFF; 1301 if (x.rlim_max > 0x7FFFFFFF) 1302 x.rlim_max = 0x7FFFFFFF; 1303 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0; 1304 } 1305 1306 #endif 1307 1308 static inline bool rlim64_is_infinity(__u64 rlim64) 1309 { 1310 #if BITS_PER_LONG < 64 1311 return rlim64 >= ULONG_MAX; 1312 #else 1313 return rlim64 == RLIM64_INFINITY; 1314 #endif 1315 } 1316 1317 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64) 1318 { 1319 if (rlim->rlim_cur == RLIM_INFINITY) 1320 rlim64->rlim_cur = RLIM64_INFINITY; 1321 else 1322 rlim64->rlim_cur = rlim->rlim_cur; 1323 if (rlim->rlim_max == RLIM_INFINITY) 1324 rlim64->rlim_max = RLIM64_INFINITY; 1325 else 1326 rlim64->rlim_max = rlim->rlim_max; 1327 } 1328 1329 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim) 1330 { 1331 if (rlim64_is_infinity(rlim64->rlim_cur)) 1332 rlim->rlim_cur = RLIM_INFINITY; 1333 else 1334 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur; 1335 if (rlim64_is_infinity(rlim64->rlim_max)) 1336 rlim->rlim_max = RLIM_INFINITY; 1337 else 1338 rlim->rlim_max = (unsigned long)rlim64->rlim_max; 1339 } 1340 1341 /* make sure you are allowed to change @tsk limits before calling this */ 1342 int do_prlimit(struct task_struct *tsk, unsigned int resource, 1343 struct rlimit *new_rlim, struct rlimit *old_rlim) 1344 { 1345 struct rlimit *rlim; 1346 int retval = 0; 1347 1348 if (resource >= RLIM_NLIMITS) 1349 return -EINVAL; 1350 if (new_rlim) { 1351 if (new_rlim->rlim_cur > new_rlim->rlim_max) 1352 return -EINVAL; 1353 if (resource == RLIMIT_NOFILE && 1354 new_rlim->rlim_max > sysctl_nr_open) 1355 return -EPERM; 1356 } 1357 1358 /* protect tsk->signal and tsk->sighand from disappearing */ 1359 read_lock(&tasklist_lock); 1360 if (!tsk->sighand) { 1361 retval = -ESRCH; 1362 goto out; 1363 } 1364 1365 rlim = tsk->signal->rlim + resource; 1366 task_lock(tsk->group_leader); 1367 if (new_rlim) { 1368 /* Keep the capable check against init_user_ns until 1369 cgroups can contain all limits */ 1370 if (new_rlim->rlim_max > rlim->rlim_max && 1371 !capable(CAP_SYS_RESOURCE)) 1372 retval = -EPERM; 1373 if (!retval) 1374 retval = security_task_setrlimit(tsk->group_leader, 1375 resource, new_rlim); 1376 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) { 1377 /* 1378 * The caller is asking for an immediate RLIMIT_CPU 1379 * expiry. But we use the zero value to mean "it was 1380 * never set". So let's cheat and make it one second 1381 * instead 1382 */ 1383 new_rlim->rlim_cur = 1; 1384 } 1385 } 1386 if (!retval) { 1387 if (old_rlim) 1388 *old_rlim = *rlim; 1389 if (new_rlim) 1390 *rlim = *new_rlim; 1391 } 1392 task_unlock(tsk->group_leader); 1393 1394 /* 1395 * RLIMIT_CPU handling. Note that the kernel fails to return an error 1396 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a 1397 * very long-standing error, and fixing it now risks breakage of 1398 * applications, so we live with it 1399 */ 1400 if (!retval && new_rlim && resource == RLIMIT_CPU && 1401 new_rlim->rlim_cur != RLIM_INFINITY) 1402 update_rlimit_cpu(tsk, new_rlim->rlim_cur); 1403 out: 1404 read_unlock(&tasklist_lock); 1405 return retval; 1406 } 1407 1408 /* rcu lock must be held */ 1409 static int check_prlimit_permission(struct task_struct *task) 1410 { 1411 const struct cred *cred = current_cred(), *tcred; 1412 1413 if (current == task) 1414 return 0; 1415 1416 tcred = __task_cred(task); 1417 if (uid_eq(cred->uid, tcred->euid) && 1418 uid_eq(cred->uid, tcred->suid) && 1419 uid_eq(cred->uid, tcred->uid) && 1420 gid_eq(cred->gid, tcred->egid) && 1421 gid_eq(cred->gid, tcred->sgid) && 1422 gid_eq(cred->gid, tcred->gid)) 1423 return 0; 1424 if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE)) 1425 return 0; 1426 1427 return -EPERM; 1428 } 1429 1430 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, 1431 const struct rlimit64 __user *, new_rlim, 1432 struct rlimit64 __user *, old_rlim) 1433 { 1434 struct rlimit64 old64, new64; 1435 struct rlimit old, new; 1436 struct task_struct *tsk; 1437 int ret; 1438 1439 if (new_rlim) { 1440 if (copy_from_user(&new64, new_rlim, sizeof(new64))) 1441 return -EFAULT; 1442 rlim64_to_rlim(&new64, &new); 1443 } 1444 1445 rcu_read_lock(); 1446 tsk = pid ? find_task_by_vpid(pid) : current; 1447 if (!tsk) { 1448 rcu_read_unlock(); 1449 return -ESRCH; 1450 } 1451 ret = check_prlimit_permission(tsk); 1452 if (ret) { 1453 rcu_read_unlock(); 1454 return ret; 1455 } 1456 get_task_struct(tsk); 1457 rcu_read_unlock(); 1458 1459 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, 1460 old_rlim ? &old : NULL); 1461 1462 if (!ret && old_rlim) { 1463 rlim_to_rlim64(&old, &old64); 1464 if (copy_to_user(old_rlim, &old64, sizeof(old64))) 1465 ret = -EFAULT; 1466 } 1467 1468 put_task_struct(tsk); 1469 return ret; 1470 } 1471 1472 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1473 { 1474 struct rlimit new_rlim; 1475 1476 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1477 return -EFAULT; 1478 return do_prlimit(current, resource, &new_rlim, NULL); 1479 } 1480 1481 /* 1482 * It would make sense to put struct rusage in the task_struct, 1483 * except that would make the task_struct be *really big*. After 1484 * task_struct gets moved into malloc'ed memory, it would 1485 * make sense to do this. It will make moving the rest of the information 1486 * a lot simpler! (Which we're not doing right now because we're not 1487 * measuring them yet). 1488 * 1489 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have 1490 * races with threads incrementing their own counters. But since word 1491 * reads are atomic, we either get new values or old values and we don't 1492 * care which for the sums. We always take the siglock to protect reading 1493 * the c* fields from p->signal from races with exit.c updating those 1494 * fields when reaping, so a sample either gets all the additions of a 1495 * given child after it's reaped, or none so this sample is before reaping. 1496 * 1497 * Locking: 1498 * We need to take the siglock for CHILDEREN, SELF and BOTH 1499 * for the cases current multithreaded, non-current single threaded 1500 * non-current multithreaded. Thread traversal is now safe with 1501 * the siglock held. 1502 * Strictly speaking, we donot need to take the siglock if we are current and 1503 * single threaded, as no one else can take our signal_struct away, no one 1504 * else can reap the children to update signal->c* counters, and no one else 1505 * can race with the signal-> fields. If we do not take any lock, the 1506 * signal-> fields could be read out of order while another thread was just 1507 * exiting. So we should place a read memory barrier when we avoid the lock. 1508 * On the writer side, write memory barrier is implied in __exit_signal 1509 * as __exit_signal releases the siglock spinlock after updating the signal-> 1510 * fields. But we don't do this yet to keep things simple. 1511 * 1512 */ 1513 1514 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r) 1515 { 1516 r->ru_nvcsw += t->nvcsw; 1517 r->ru_nivcsw += t->nivcsw; 1518 r->ru_minflt += t->min_flt; 1519 r->ru_majflt += t->maj_flt; 1520 r->ru_inblock += task_io_get_inblock(t); 1521 r->ru_oublock += task_io_get_oublock(t); 1522 } 1523 1524 static void k_getrusage(struct task_struct *p, int who, struct rusage *r) 1525 { 1526 struct task_struct *t; 1527 unsigned long flags; 1528 cputime_t tgutime, tgstime, utime, stime; 1529 unsigned long maxrss = 0; 1530 1531 memset((char *) r, 0, sizeof *r); 1532 utime = stime = 0; 1533 1534 if (who == RUSAGE_THREAD) { 1535 task_cputime_adjusted(current, &utime, &stime); 1536 accumulate_thread_rusage(p, r); 1537 maxrss = p->signal->maxrss; 1538 goto out; 1539 } 1540 1541 if (!lock_task_sighand(p, &flags)) 1542 return; 1543 1544 switch (who) { 1545 case RUSAGE_BOTH: 1546 case RUSAGE_CHILDREN: 1547 utime = p->signal->cutime; 1548 stime = p->signal->cstime; 1549 r->ru_nvcsw = p->signal->cnvcsw; 1550 r->ru_nivcsw = p->signal->cnivcsw; 1551 r->ru_minflt = p->signal->cmin_flt; 1552 r->ru_majflt = p->signal->cmaj_flt; 1553 r->ru_inblock = p->signal->cinblock; 1554 r->ru_oublock = p->signal->coublock; 1555 maxrss = p->signal->cmaxrss; 1556 1557 if (who == RUSAGE_CHILDREN) 1558 break; 1559 1560 case RUSAGE_SELF: 1561 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1562 utime += tgutime; 1563 stime += tgstime; 1564 r->ru_nvcsw += p->signal->nvcsw; 1565 r->ru_nivcsw += p->signal->nivcsw; 1566 r->ru_minflt += p->signal->min_flt; 1567 r->ru_majflt += p->signal->maj_flt; 1568 r->ru_inblock += p->signal->inblock; 1569 r->ru_oublock += p->signal->oublock; 1570 if (maxrss < p->signal->maxrss) 1571 maxrss = p->signal->maxrss; 1572 t = p; 1573 do { 1574 accumulate_thread_rusage(t, r); 1575 t = next_thread(t); 1576 } while (t != p); 1577 break; 1578 1579 default: 1580 BUG(); 1581 } 1582 unlock_task_sighand(p, &flags); 1583 1584 out: 1585 cputime_to_timeval(utime, &r->ru_utime); 1586 cputime_to_timeval(stime, &r->ru_stime); 1587 1588 if (who != RUSAGE_CHILDREN) { 1589 struct mm_struct *mm = get_task_mm(p); 1590 if (mm) { 1591 setmax_mm_hiwater_rss(&maxrss, mm); 1592 mmput(mm); 1593 } 1594 } 1595 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ 1596 } 1597 1598 int getrusage(struct task_struct *p, int who, struct rusage __user *ru) 1599 { 1600 struct rusage r; 1601 k_getrusage(p, who, &r); 1602 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1603 } 1604 1605 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru) 1606 { 1607 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1608 who != RUSAGE_THREAD) 1609 return -EINVAL; 1610 return getrusage(current, who, ru); 1611 } 1612 1613 #ifdef CONFIG_COMPAT 1614 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru) 1615 { 1616 struct rusage r; 1617 1618 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1619 who != RUSAGE_THREAD) 1620 return -EINVAL; 1621 1622 k_getrusage(current, who, &r); 1623 return put_compat_rusage(&r, ru); 1624 } 1625 #endif 1626 1627 SYSCALL_DEFINE1(umask, int, mask) 1628 { 1629 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); 1630 return mask; 1631 } 1632 1633 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) 1634 { 1635 struct fd exe; 1636 struct inode *inode; 1637 int err; 1638 1639 exe = fdget(fd); 1640 if (!exe.file) 1641 return -EBADF; 1642 1643 inode = file_inode(exe.file); 1644 1645 /* 1646 * Because the original mm->exe_file points to executable file, make 1647 * sure that this one is executable as well, to avoid breaking an 1648 * overall picture. 1649 */ 1650 err = -EACCES; 1651 if (!S_ISREG(inode->i_mode) || 1652 exe.file->f_path.mnt->mnt_flags & MNT_NOEXEC) 1653 goto exit; 1654 1655 err = inode_permission(inode, MAY_EXEC); 1656 if (err) 1657 goto exit; 1658 1659 down_write(&mm->mmap_sem); 1660 1661 /* 1662 * Forbid mm->exe_file change if old file still mapped. 1663 */ 1664 err = -EBUSY; 1665 if (mm->exe_file) { 1666 struct vm_area_struct *vma; 1667 1668 for (vma = mm->mmap; vma; vma = vma->vm_next) 1669 if (vma->vm_file && 1670 path_equal(&vma->vm_file->f_path, 1671 &mm->exe_file->f_path)) 1672 goto exit_unlock; 1673 } 1674 1675 /* 1676 * The symlink can be changed only once, just to disallow arbitrary 1677 * transitions malicious software might bring in. This means one 1678 * could make a snapshot over all processes running and monitor 1679 * /proc/pid/exe changes to notice unusual activity if needed. 1680 */ 1681 err = -EPERM; 1682 if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags)) 1683 goto exit_unlock; 1684 1685 err = 0; 1686 set_mm_exe_file(mm, exe.file); /* this grabs a reference to exe.file */ 1687 exit_unlock: 1688 up_write(&mm->mmap_sem); 1689 1690 exit: 1691 fdput(exe); 1692 return err; 1693 } 1694 1695 static int prctl_set_mm(int opt, unsigned long addr, 1696 unsigned long arg4, unsigned long arg5) 1697 { 1698 unsigned long rlim = rlimit(RLIMIT_DATA); 1699 struct mm_struct *mm = current->mm; 1700 struct vm_area_struct *vma; 1701 int error; 1702 1703 if (arg5 || (arg4 && opt != PR_SET_MM_AUXV)) 1704 return -EINVAL; 1705 1706 if (!capable(CAP_SYS_RESOURCE)) 1707 return -EPERM; 1708 1709 if (opt == PR_SET_MM_EXE_FILE) 1710 return prctl_set_mm_exe_file(mm, (unsigned int)addr); 1711 1712 if (addr >= TASK_SIZE || addr < mmap_min_addr) 1713 return -EINVAL; 1714 1715 error = -EINVAL; 1716 1717 down_read(&mm->mmap_sem); 1718 vma = find_vma(mm, addr); 1719 1720 switch (opt) { 1721 case PR_SET_MM_START_CODE: 1722 mm->start_code = addr; 1723 break; 1724 case PR_SET_MM_END_CODE: 1725 mm->end_code = addr; 1726 break; 1727 case PR_SET_MM_START_DATA: 1728 mm->start_data = addr; 1729 break; 1730 case PR_SET_MM_END_DATA: 1731 mm->end_data = addr; 1732 break; 1733 1734 case PR_SET_MM_START_BRK: 1735 if (addr <= mm->end_data) 1736 goto out; 1737 1738 if (rlim < RLIM_INFINITY && 1739 (mm->brk - addr) + 1740 (mm->end_data - mm->start_data) > rlim) 1741 goto out; 1742 1743 mm->start_brk = addr; 1744 break; 1745 1746 case PR_SET_MM_BRK: 1747 if (addr <= mm->end_data) 1748 goto out; 1749 1750 if (rlim < RLIM_INFINITY && 1751 (addr - mm->start_brk) + 1752 (mm->end_data - mm->start_data) > rlim) 1753 goto out; 1754 1755 mm->brk = addr; 1756 break; 1757 1758 /* 1759 * If command line arguments and environment 1760 * are placed somewhere else on stack, we can 1761 * set them up here, ARG_START/END to setup 1762 * command line argumets and ENV_START/END 1763 * for environment. 1764 */ 1765 case PR_SET_MM_START_STACK: 1766 case PR_SET_MM_ARG_START: 1767 case PR_SET_MM_ARG_END: 1768 case PR_SET_MM_ENV_START: 1769 case PR_SET_MM_ENV_END: 1770 if (!vma) { 1771 error = -EFAULT; 1772 goto out; 1773 } 1774 if (opt == PR_SET_MM_START_STACK) 1775 mm->start_stack = addr; 1776 else if (opt == PR_SET_MM_ARG_START) 1777 mm->arg_start = addr; 1778 else if (opt == PR_SET_MM_ARG_END) 1779 mm->arg_end = addr; 1780 else if (opt == PR_SET_MM_ENV_START) 1781 mm->env_start = addr; 1782 else if (opt == PR_SET_MM_ENV_END) 1783 mm->env_end = addr; 1784 break; 1785 1786 /* 1787 * This doesn't move auxiliary vector itself 1788 * since it's pinned to mm_struct, but allow 1789 * to fill vector with new values. It's up 1790 * to a caller to provide sane values here 1791 * otherwise user space tools which use this 1792 * vector might be unhappy. 1793 */ 1794 case PR_SET_MM_AUXV: { 1795 unsigned long user_auxv[AT_VECTOR_SIZE]; 1796 1797 if (arg4 > sizeof(user_auxv)) 1798 goto out; 1799 up_read(&mm->mmap_sem); 1800 1801 if (copy_from_user(user_auxv, (const void __user *)addr, arg4)) 1802 return -EFAULT; 1803 1804 /* Make sure the last entry is always AT_NULL */ 1805 user_auxv[AT_VECTOR_SIZE - 2] = 0; 1806 user_auxv[AT_VECTOR_SIZE - 1] = 0; 1807 1808 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); 1809 1810 task_lock(current); 1811 memcpy(mm->saved_auxv, user_auxv, arg4); 1812 task_unlock(current); 1813 1814 return 0; 1815 } 1816 default: 1817 goto out; 1818 } 1819 1820 error = 0; 1821 out: 1822 up_read(&mm->mmap_sem); 1823 return error; 1824 } 1825 1826 #ifdef CONFIG_CHECKPOINT_RESTORE 1827 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) 1828 { 1829 return put_user(me->clear_child_tid, tid_addr); 1830 } 1831 #else 1832 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) 1833 { 1834 return -EINVAL; 1835 } 1836 #endif 1837 1838 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, 1839 unsigned long, arg4, unsigned long, arg5) 1840 { 1841 struct task_struct *me = current; 1842 unsigned char comm[sizeof(me->comm)]; 1843 long error; 1844 1845 error = security_task_prctl(option, arg2, arg3, arg4, arg5); 1846 if (error != -ENOSYS) 1847 return error; 1848 1849 error = 0; 1850 switch (option) { 1851 case PR_SET_PDEATHSIG: 1852 if (!valid_signal(arg2)) { 1853 error = -EINVAL; 1854 break; 1855 } 1856 me->pdeath_signal = arg2; 1857 break; 1858 case PR_GET_PDEATHSIG: 1859 error = put_user(me->pdeath_signal, (int __user *)arg2); 1860 break; 1861 case PR_GET_DUMPABLE: 1862 error = get_dumpable(me->mm); 1863 break; 1864 case PR_SET_DUMPABLE: 1865 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) { 1866 error = -EINVAL; 1867 break; 1868 } 1869 set_dumpable(me->mm, arg2); 1870 break; 1871 1872 case PR_SET_UNALIGN: 1873 error = SET_UNALIGN_CTL(me, arg2); 1874 break; 1875 case PR_GET_UNALIGN: 1876 error = GET_UNALIGN_CTL(me, arg2); 1877 break; 1878 case PR_SET_FPEMU: 1879 error = SET_FPEMU_CTL(me, arg2); 1880 break; 1881 case PR_GET_FPEMU: 1882 error = GET_FPEMU_CTL(me, arg2); 1883 break; 1884 case PR_SET_FPEXC: 1885 error = SET_FPEXC_CTL(me, arg2); 1886 break; 1887 case PR_GET_FPEXC: 1888 error = GET_FPEXC_CTL(me, arg2); 1889 break; 1890 case PR_GET_TIMING: 1891 error = PR_TIMING_STATISTICAL; 1892 break; 1893 case PR_SET_TIMING: 1894 if (arg2 != PR_TIMING_STATISTICAL) 1895 error = -EINVAL; 1896 break; 1897 case PR_SET_NAME: 1898 comm[sizeof(me->comm) - 1] = 0; 1899 if (strncpy_from_user(comm, (char __user *)arg2, 1900 sizeof(me->comm) - 1) < 0) 1901 return -EFAULT; 1902 set_task_comm(me, comm); 1903 proc_comm_connector(me); 1904 break; 1905 case PR_GET_NAME: 1906 get_task_comm(comm, me); 1907 if (copy_to_user((char __user *)arg2, comm, sizeof(comm))) 1908 return -EFAULT; 1909 break; 1910 case PR_GET_ENDIAN: 1911 error = GET_ENDIAN(me, arg2); 1912 break; 1913 case PR_SET_ENDIAN: 1914 error = SET_ENDIAN(me, arg2); 1915 break; 1916 case PR_GET_SECCOMP: 1917 error = prctl_get_seccomp(); 1918 break; 1919 case PR_SET_SECCOMP: 1920 error = prctl_set_seccomp(arg2, (char __user *)arg3); 1921 break; 1922 case PR_GET_TSC: 1923 error = GET_TSC_CTL(arg2); 1924 break; 1925 case PR_SET_TSC: 1926 error = SET_TSC_CTL(arg2); 1927 break; 1928 case PR_TASK_PERF_EVENTS_DISABLE: 1929 error = perf_event_task_disable(); 1930 break; 1931 case PR_TASK_PERF_EVENTS_ENABLE: 1932 error = perf_event_task_enable(); 1933 break; 1934 case PR_GET_TIMERSLACK: 1935 error = current->timer_slack_ns; 1936 break; 1937 case PR_SET_TIMERSLACK: 1938 if (arg2 <= 0) 1939 current->timer_slack_ns = 1940 current->default_timer_slack_ns; 1941 else 1942 current->timer_slack_ns = arg2; 1943 break; 1944 case PR_MCE_KILL: 1945 if (arg4 | arg5) 1946 return -EINVAL; 1947 switch (arg2) { 1948 case PR_MCE_KILL_CLEAR: 1949 if (arg3 != 0) 1950 return -EINVAL; 1951 current->flags &= ~PF_MCE_PROCESS; 1952 break; 1953 case PR_MCE_KILL_SET: 1954 current->flags |= PF_MCE_PROCESS; 1955 if (arg3 == PR_MCE_KILL_EARLY) 1956 current->flags |= PF_MCE_EARLY; 1957 else if (arg3 == PR_MCE_KILL_LATE) 1958 current->flags &= ~PF_MCE_EARLY; 1959 else if (arg3 == PR_MCE_KILL_DEFAULT) 1960 current->flags &= 1961 ~(PF_MCE_EARLY|PF_MCE_PROCESS); 1962 else 1963 return -EINVAL; 1964 break; 1965 default: 1966 return -EINVAL; 1967 } 1968 break; 1969 case PR_MCE_KILL_GET: 1970 if (arg2 | arg3 | arg4 | arg5) 1971 return -EINVAL; 1972 if (current->flags & PF_MCE_PROCESS) 1973 error = (current->flags & PF_MCE_EARLY) ? 1974 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; 1975 else 1976 error = PR_MCE_KILL_DEFAULT; 1977 break; 1978 case PR_SET_MM: 1979 error = prctl_set_mm(arg2, arg3, arg4, arg5); 1980 break; 1981 case PR_GET_TID_ADDRESS: 1982 error = prctl_get_tid_address(me, (int __user **)arg2); 1983 break; 1984 case PR_SET_CHILD_SUBREAPER: 1985 me->signal->is_child_subreaper = !!arg2; 1986 break; 1987 case PR_GET_CHILD_SUBREAPER: 1988 error = put_user(me->signal->is_child_subreaper, 1989 (int __user *)arg2); 1990 break; 1991 case PR_SET_NO_NEW_PRIVS: 1992 if (arg2 != 1 || arg3 || arg4 || arg5) 1993 return -EINVAL; 1994 1995 current->no_new_privs = 1; 1996 break; 1997 case PR_GET_NO_NEW_PRIVS: 1998 if (arg2 || arg3 || arg4 || arg5) 1999 return -EINVAL; 2000 return current->no_new_privs ? 1 : 0; 2001 default: 2002 error = -EINVAL; 2003 break; 2004 } 2005 return error; 2006 } 2007 2008 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, 2009 struct getcpu_cache __user *, unused) 2010 { 2011 int err = 0; 2012 int cpu = raw_smp_processor_id(); 2013 if (cpup) 2014 err |= put_user(cpu, cpup); 2015 if (nodep) 2016 err |= put_user(cpu_to_node(cpu), nodep); 2017 return err ? -EFAULT : 0; 2018 } 2019 2020 /** 2021 * do_sysinfo - fill in sysinfo struct 2022 * @info: pointer to buffer to fill 2023 */ 2024 static int do_sysinfo(struct sysinfo *info) 2025 { 2026 unsigned long mem_total, sav_total; 2027 unsigned int mem_unit, bitcount; 2028 struct timespec tp; 2029 2030 memset(info, 0, sizeof(struct sysinfo)); 2031 2032 get_monotonic_boottime(&tp); 2033 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); 2034 2035 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); 2036 2037 info->procs = nr_threads; 2038 2039 si_meminfo(info); 2040 si_swapinfo(info); 2041 2042 /* 2043 * If the sum of all the available memory (i.e. ram + swap) 2044 * is less than can be stored in a 32 bit unsigned long then 2045 * we can be binary compatible with 2.2.x kernels. If not, 2046 * well, in that case 2.2.x was broken anyways... 2047 * 2048 * -Erik Andersen <andersee@debian.org> 2049 */ 2050 2051 mem_total = info->totalram + info->totalswap; 2052 if (mem_total < info->totalram || mem_total < info->totalswap) 2053 goto out; 2054 bitcount = 0; 2055 mem_unit = info->mem_unit; 2056 while (mem_unit > 1) { 2057 bitcount++; 2058 mem_unit >>= 1; 2059 sav_total = mem_total; 2060 mem_total <<= 1; 2061 if (mem_total < sav_total) 2062 goto out; 2063 } 2064 2065 /* 2066 * If mem_total did not overflow, multiply all memory values by 2067 * info->mem_unit and set it to 1. This leaves things compatible 2068 * with 2.2.x, and also retains compatibility with earlier 2.4.x 2069 * kernels... 2070 */ 2071 2072 info->mem_unit = 1; 2073 info->totalram <<= bitcount; 2074 info->freeram <<= bitcount; 2075 info->sharedram <<= bitcount; 2076 info->bufferram <<= bitcount; 2077 info->totalswap <<= bitcount; 2078 info->freeswap <<= bitcount; 2079 info->totalhigh <<= bitcount; 2080 info->freehigh <<= bitcount; 2081 2082 out: 2083 return 0; 2084 } 2085 2086 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info) 2087 { 2088 struct sysinfo val; 2089 2090 do_sysinfo(&val); 2091 2092 if (copy_to_user(info, &val, sizeof(struct sysinfo))) 2093 return -EFAULT; 2094 2095 return 0; 2096 } 2097 2098 #ifdef CONFIG_COMPAT 2099 struct compat_sysinfo { 2100 s32 uptime; 2101 u32 loads[3]; 2102 u32 totalram; 2103 u32 freeram; 2104 u32 sharedram; 2105 u32 bufferram; 2106 u32 totalswap; 2107 u32 freeswap; 2108 u16 procs; 2109 u16 pad; 2110 u32 totalhigh; 2111 u32 freehigh; 2112 u32 mem_unit; 2113 char _f[20-2*sizeof(u32)-sizeof(int)]; 2114 }; 2115 2116 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info) 2117 { 2118 struct sysinfo s; 2119 2120 do_sysinfo(&s); 2121 2122 /* Check to see if any memory value is too large for 32-bit and scale 2123 * down if needed 2124 */ 2125 if ((s.totalram >> 32) || (s.totalswap >> 32)) { 2126 int bitcount = 0; 2127 2128 while (s.mem_unit < PAGE_SIZE) { 2129 s.mem_unit <<= 1; 2130 bitcount++; 2131 } 2132 2133 s.totalram >>= bitcount; 2134 s.freeram >>= bitcount; 2135 s.sharedram >>= bitcount; 2136 s.bufferram >>= bitcount; 2137 s.totalswap >>= bitcount; 2138 s.freeswap >>= bitcount; 2139 s.totalhigh >>= bitcount; 2140 s.freehigh >>= bitcount; 2141 } 2142 2143 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) || 2144 __put_user(s.uptime, &info->uptime) || 2145 __put_user(s.loads[0], &info->loads[0]) || 2146 __put_user(s.loads[1], &info->loads[1]) || 2147 __put_user(s.loads[2], &info->loads[2]) || 2148 __put_user(s.totalram, &info->totalram) || 2149 __put_user(s.freeram, &info->freeram) || 2150 __put_user(s.sharedram, &info->sharedram) || 2151 __put_user(s.bufferram, &info->bufferram) || 2152 __put_user(s.totalswap, &info->totalswap) || 2153 __put_user(s.freeswap, &info->freeswap) || 2154 __put_user(s.procs, &info->procs) || 2155 __put_user(s.totalhigh, &info->totalhigh) || 2156 __put_user(s.freehigh, &info->freehigh) || 2157 __put_user(s.mem_unit, &info->mem_unit)) 2158 return -EFAULT; 2159 2160 return 0; 2161 } 2162 #endif /* CONFIG_COMPAT */ 2163