1 /* 2 * linux/kernel/sys.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/config.h> 8 #include <linux/module.h> 9 #include <linux/mm.h> 10 #include <linux/utsname.h> 11 #include <linux/mman.h> 12 #include <linux/smp_lock.h> 13 #include <linux/notifier.h> 14 #include <linux/reboot.h> 15 #include <linux/prctl.h> 16 #include <linux/init.h> 17 #include <linux/highuid.h> 18 #include <linux/fs.h> 19 #include <linux/workqueue.h> 20 #include <linux/device.h> 21 #include <linux/key.h> 22 #include <linux/times.h> 23 #include <linux/posix-timers.h> 24 #include <linux/security.h> 25 #include <linux/dcookies.h> 26 #include <linux/suspend.h> 27 #include <linux/tty.h> 28 #include <linux/signal.h> 29 30 #include <linux/compat.h> 31 #include <linux/syscalls.h> 32 33 #include <asm/uaccess.h> 34 #include <asm/io.h> 35 #include <asm/unistd.h> 36 37 #ifndef SET_UNALIGN_CTL 38 # define SET_UNALIGN_CTL(a,b) (-EINVAL) 39 #endif 40 #ifndef GET_UNALIGN_CTL 41 # define GET_UNALIGN_CTL(a,b) (-EINVAL) 42 #endif 43 #ifndef SET_FPEMU_CTL 44 # define SET_FPEMU_CTL(a,b) (-EINVAL) 45 #endif 46 #ifndef GET_FPEMU_CTL 47 # define GET_FPEMU_CTL(a,b) (-EINVAL) 48 #endif 49 #ifndef SET_FPEXC_CTL 50 # define SET_FPEXC_CTL(a,b) (-EINVAL) 51 #endif 52 #ifndef GET_FPEXC_CTL 53 # define GET_FPEXC_CTL(a,b) (-EINVAL) 54 #endif 55 56 /* 57 * this is where the system-wide overflow UID and GID are defined, for 58 * architectures that now have 32-bit UID/GID but didn't in the past 59 */ 60 61 int overflowuid = DEFAULT_OVERFLOWUID; 62 int overflowgid = DEFAULT_OVERFLOWGID; 63 64 #ifdef CONFIG_UID16 65 EXPORT_SYMBOL(overflowuid); 66 EXPORT_SYMBOL(overflowgid); 67 #endif 68 69 /* 70 * the same as above, but for filesystems which can only store a 16-bit 71 * UID and GID. as such, this is needed on all architectures 72 */ 73 74 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; 75 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID; 76 77 EXPORT_SYMBOL(fs_overflowuid); 78 EXPORT_SYMBOL(fs_overflowgid); 79 80 /* 81 * this indicates whether you can reboot with ctrl-alt-del: the default is yes 82 */ 83 84 int C_A_D = 1; 85 int cad_pid = 1; 86 87 /* 88 * Notifier list for kernel code which wants to be called 89 * at shutdown. This is used to stop any idling DMA operations 90 * and the like. 91 */ 92 93 static struct notifier_block *reboot_notifier_list; 94 static DEFINE_RWLOCK(notifier_lock); 95 96 /** 97 * notifier_chain_register - Add notifier to a notifier chain 98 * @list: Pointer to root list pointer 99 * @n: New entry in notifier chain 100 * 101 * Adds a notifier to a notifier chain. 102 * 103 * Currently always returns zero. 104 */ 105 106 int notifier_chain_register(struct notifier_block **list, struct notifier_block *n) 107 { 108 write_lock(¬ifier_lock); 109 while(*list) 110 { 111 if(n->priority > (*list)->priority) 112 break; 113 list= &((*list)->next); 114 } 115 n->next = *list; 116 *list=n; 117 write_unlock(¬ifier_lock); 118 return 0; 119 } 120 121 EXPORT_SYMBOL(notifier_chain_register); 122 123 /** 124 * notifier_chain_unregister - Remove notifier from a notifier chain 125 * @nl: Pointer to root list pointer 126 * @n: New entry in notifier chain 127 * 128 * Removes a notifier from a notifier chain. 129 * 130 * Returns zero on success, or %-ENOENT on failure. 131 */ 132 133 int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n) 134 { 135 write_lock(¬ifier_lock); 136 while((*nl)!=NULL) 137 { 138 if((*nl)==n) 139 { 140 *nl=n->next; 141 write_unlock(¬ifier_lock); 142 return 0; 143 } 144 nl=&((*nl)->next); 145 } 146 write_unlock(¬ifier_lock); 147 return -ENOENT; 148 } 149 150 EXPORT_SYMBOL(notifier_chain_unregister); 151 152 /** 153 * notifier_call_chain - Call functions in a notifier chain 154 * @n: Pointer to root pointer of notifier chain 155 * @val: Value passed unmodified to notifier function 156 * @v: Pointer passed unmodified to notifier function 157 * 158 * Calls each function in a notifier chain in turn. 159 * 160 * If the return value of the notifier can be and'd 161 * with %NOTIFY_STOP_MASK, then notifier_call_chain 162 * will return immediately, with the return value of 163 * the notifier function which halted execution. 164 * Otherwise, the return value is the return value 165 * of the last notifier function called. 166 */ 167 168 int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v) 169 { 170 int ret=NOTIFY_DONE; 171 struct notifier_block *nb = *n; 172 173 while(nb) 174 { 175 ret=nb->notifier_call(nb,val,v); 176 if(ret&NOTIFY_STOP_MASK) 177 { 178 return ret; 179 } 180 nb=nb->next; 181 } 182 return ret; 183 } 184 185 EXPORT_SYMBOL(notifier_call_chain); 186 187 /** 188 * register_reboot_notifier - Register function to be called at reboot time 189 * @nb: Info about notifier function to be called 190 * 191 * Registers a function with the list of functions 192 * to be called at reboot time. 193 * 194 * Currently always returns zero, as notifier_chain_register 195 * always returns zero. 196 */ 197 198 int register_reboot_notifier(struct notifier_block * nb) 199 { 200 return notifier_chain_register(&reboot_notifier_list, nb); 201 } 202 203 EXPORT_SYMBOL(register_reboot_notifier); 204 205 /** 206 * unregister_reboot_notifier - Unregister previously registered reboot notifier 207 * @nb: Hook to be unregistered 208 * 209 * Unregisters a previously registered reboot 210 * notifier function. 211 * 212 * Returns zero on success, or %-ENOENT on failure. 213 */ 214 215 int unregister_reboot_notifier(struct notifier_block * nb) 216 { 217 return notifier_chain_unregister(&reboot_notifier_list, nb); 218 } 219 220 EXPORT_SYMBOL(unregister_reboot_notifier); 221 222 static int set_one_prio(struct task_struct *p, int niceval, int error) 223 { 224 int no_nice; 225 226 if (p->uid != current->euid && 227 p->euid != current->euid && !capable(CAP_SYS_NICE)) { 228 error = -EPERM; 229 goto out; 230 } 231 if (niceval < task_nice(p) && !can_nice(p, niceval)) { 232 error = -EACCES; 233 goto out; 234 } 235 no_nice = security_task_setnice(p, niceval); 236 if (no_nice) { 237 error = no_nice; 238 goto out; 239 } 240 if (error == -ESRCH) 241 error = 0; 242 set_user_nice(p, niceval); 243 out: 244 return error; 245 } 246 247 asmlinkage long sys_setpriority(int which, int who, int niceval) 248 { 249 struct task_struct *g, *p; 250 struct user_struct *user; 251 int error = -EINVAL; 252 253 if (which > 2 || which < 0) 254 goto out; 255 256 /* normalize: avoid signed division (rounding problems) */ 257 error = -ESRCH; 258 if (niceval < -20) 259 niceval = -20; 260 if (niceval > 19) 261 niceval = 19; 262 263 read_lock(&tasklist_lock); 264 switch (which) { 265 case PRIO_PROCESS: 266 if (!who) 267 who = current->pid; 268 p = find_task_by_pid(who); 269 if (p) 270 error = set_one_prio(p, niceval, error); 271 break; 272 case PRIO_PGRP: 273 if (!who) 274 who = process_group(current); 275 do_each_task_pid(who, PIDTYPE_PGID, p) { 276 error = set_one_prio(p, niceval, error); 277 } while_each_task_pid(who, PIDTYPE_PGID, p); 278 break; 279 case PRIO_USER: 280 user = current->user; 281 if (!who) 282 who = current->uid; 283 else 284 if ((who != current->uid) && !(user = find_user(who))) 285 goto out_unlock; /* No processes for this user */ 286 287 do_each_thread(g, p) 288 if (p->uid == who) 289 error = set_one_prio(p, niceval, error); 290 while_each_thread(g, p); 291 if (who != current->uid) 292 free_uid(user); /* For find_user() */ 293 break; 294 } 295 out_unlock: 296 read_unlock(&tasklist_lock); 297 out: 298 return error; 299 } 300 301 /* 302 * Ugh. To avoid negative return values, "getpriority()" will 303 * not return the normal nice-value, but a negated value that 304 * has been offset by 20 (ie it returns 40..1 instead of -20..19) 305 * to stay compatible. 306 */ 307 asmlinkage long sys_getpriority(int which, int who) 308 { 309 struct task_struct *g, *p; 310 struct user_struct *user; 311 long niceval, retval = -ESRCH; 312 313 if (which > 2 || which < 0) 314 return -EINVAL; 315 316 read_lock(&tasklist_lock); 317 switch (which) { 318 case PRIO_PROCESS: 319 if (!who) 320 who = current->pid; 321 p = find_task_by_pid(who); 322 if (p) { 323 niceval = 20 - task_nice(p); 324 if (niceval > retval) 325 retval = niceval; 326 } 327 break; 328 case PRIO_PGRP: 329 if (!who) 330 who = process_group(current); 331 do_each_task_pid(who, PIDTYPE_PGID, p) { 332 niceval = 20 - task_nice(p); 333 if (niceval > retval) 334 retval = niceval; 335 } while_each_task_pid(who, PIDTYPE_PGID, p); 336 break; 337 case PRIO_USER: 338 user = current->user; 339 if (!who) 340 who = current->uid; 341 else 342 if ((who != current->uid) && !(user = find_user(who))) 343 goto out_unlock; /* No processes for this user */ 344 345 do_each_thread(g, p) 346 if (p->uid == who) { 347 niceval = 20 - task_nice(p); 348 if (niceval > retval) 349 retval = niceval; 350 } 351 while_each_thread(g, p); 352 if (who != current->uid) 353 free_uid(user); /* for find_user() */ 354 break; 355 } 356 out_unlock: 357 read_unlock(&tasklist_lock); 358 359 return retval; 360 } 361 362 363 /* 364 * Reboot system call: for obvious reasons only root may call it, 365 * and even root needs to set up some magic numbers in the registers 366 * so that some mistake won't make this reboot the whole machine. 367 * You can also set the meaning of the ctrl-alt-del-key here. 368 * 369 * reboot doesn't sync: do that yourself before calling this. 370 */ 371 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg) 372 { 373 char buffer[256]; 374 375 /* We only trust the superuser with rebooting the system. */ 376 if (!capable(CAP_SYS_BOOT)) 377 return -EPERM; 378 379 /* For safety, we require "magic" arguments. */ 380 if (magic1 != LINUX_REBOOT_MAGIC1 || 381 (magic2 != LINUX_REBOOT_MAGIC2 && 382 magic2 != LINUX_REBOOT_MAGIC2A && 383 magic2 != LINUX_REBOOT_MAGIC2B && 384 magic2 != LINUX_REBOOT_MAGIC2C)) 385 return -EINVAL; 386 387 lock_kernel(); 388 switch (cmd) { 389 case LINUX_REBOOT_CMD_RESTART: 390 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); 391 system_state = SYSTEM_RESTART; 392 device_shutdown(); 393 printk(KERN_EMERG "Restarting system.\n"); 394 machine_restart(NULL); 395 break; 396 397 case LINUX_REBOOT_CMD_CAD_ON: 398 C_A_D = 1; 399 break; 400 401 case LINUX_REBOOT_CMD_CAD_OFF: 402 C_A_D = 0; 403 break; 404 405 case LINUX_REBOOT_CMD_HALT: 406 notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL); 407 system_state = SYSTEM_HALT; 408 device_shutdown(); 409 printk(KERN_EMERG "System halted.\n"); 410 machine_halt(); 411 unlock_kernel(); 412 do_exit(0); 413 break; 414 415 case LINUX_REBOOT_CMD_POWER_OFF: 416 notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL); 417 system_state = SYSTEM_POWER_OFF; 418 device_shutdown(); 419 printk(KERN_EMERG "Power down.\n"); 420 machine_power_off(); 421 unlock_kernel(); 422 do_exit(0); 423 break; 424 425 case LINUX_REBOOT_CMD_RESTART2: 426 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) { 427 unlock_kernel(); 428 return -EFAULT; 429 } 430 buffer[sizeof(buffer) - 1] = '\0'; 431 432 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, buffer); 433 system_state = SYSTEM_RESTART; 434 device_shutdown(); 435 printk(KERN_EMERG "Restarting system with command '%s'.\n", buffer); 436 machine_restart(buffer); 437 break; 438 439 #ifdef CONFIG_SOFTWARE_SUSPEND 440 case LINUX_REBOOT_CMD_SW_SUSPEND: 441 { 442 int ret = software_suspend(); 443 unlock_kernel(); 444 return ret; 445 } 446 #endif 447 448 default: 449 unlock_kernel(); 450 return -EINVAL; 451 } 452 unlock_kernel(); 453 return 0; 454 } 455 456 static void deferred_cad(void *dummy) 457 { 458 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); 459 machine_restart(NULL); 460 } 461 462 /* 463 * This function gets called by ctrl-alt-del - ie the keyboard interrupt. 464 * As it's called within an interrupt, it may NOT sync: the only choice 465 * is whether to reboot at once, or just ignore the ctrl-alt-del. 466 */ 467 void ctrl_alt_del(void) 468 { 469 static DECLARE_WORK(cad_work, deferred_cad, NULL); 470 471 if (C_A_D) 472 schedule_work(&cad_work); 473 else 474 kill_proc(cad_pid, SIGINT, 1); 475 } 476 477 478 /* 479 * Unprivileged users may change the real gid to the effective gid 480 * or vice versa. (BSD-style) 481 * 482 * If you set the real gid at all, or set the effective gid to a value not 483 * equal to the real gid, then the saved gid is set to the new effective gid. 484 * 485 * This makes it possible for a setgid program to completely drop its 486 * privileges, which is often a useful assertion to make when you are doing 487 * a security audit over a program. 488 * 489 * The general idea is that a program which uses just setregid() will be 490 * 100% compatible with BSD. A program which uses just setgid() will be 491 * 100% compatible with POSIX with saved IDs. 492 * 493 * SMP: There are not races, the GIDs are checked only by filesystem 494 * operations (as far as semantic preservation is concerned). 495 */ 496 asmlinkage long sys_setregid(gid_t rgid, gid_t egid) 497 { 498 int old_rgid = current->gid; 499 int old_egid = current->egid; 500 int new_rgid = old_rgid; 501 int new_egid = old_egid; 502 int retval; 503 504 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE); 505 if (retval) 506 return retval; 507 508 if (rgid != (gid_t) -1) { 509 if ((old_rgid == rgid) || 510 (current->egid==rgid) || 511 capable(CAP_SETGID)) 512 new_rgid = rgid; 513 else 514 return -EPERM; 515 } 516 if (egid != (gid_t) -1) { 517 if ((old_rgid == egid) || 518 (current->egid == egid) || 519 (current->sgid == egid) || 520 capable(CAP_SETGID)) 521 new_egid = egid; 522 else { 523 return -EPERM; 524 } 525 } 526 if (new_egid != old_egid) 527 { 528 current->mm->dumpable = suid_dumpable; 529 smp_wmb(); 530 } 531 if (rgid != (gid_t) -1 || 532 (egid != (gid_t) -1 && egid != old_rgid)) 533 current->sgid = new_egid; 534 current->fsgid = new_egid; 535 current->egid = new_egid; 536 current->gid = new_rgid; 537 key_fsgid_changed(current); 538 return 0; 539 } 540 541 /* 542 * setgid() is implemented like SysV w/ SAVED_IDS 543 * 544 * SMP: Same implicit races as above. 545 */ 546 asmlinkage long sys_setgid(gid_t gid) 547 { 548 int old_egid = current->egid; 549 int retval; 550 551 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID); 552 if (retval) 553 return retval; 554 555 if (capable(CAP_SETGID)) 556 { 557 if(old_egid != gid) 558 { 559 current->mm->dumpable = suid_dumpable; 560 smp_wmb(); 561 } 562 current->gid = current->egid = current->sgid = current->fsgid = gid; 563 } 564 else if ((gid == current->gid) || (gid == current->sgid)) 565 { 566 if(old_egid != gid) 567 { 568 current->mm->dumpable = suid_dumpable; 569 smp_wmb(); 570 } 571 current->egid = current->fsgid = gid; 572 } 573 else 574 return -EPERM; 575 576 key_fsgid_changed(current); 577 return 0; 578 } 579 580 static int set_user(uid_t new_ruid, int dumpclear) 581 { 582 struct user_struct *new_user; 583 584 new_user = alloc_uid(new_ruid); 585 if (!new_user) 586 return -EAGAIN; 587 588 if (atomic_read(&new_user->processes) >= 589 current->signal->rlim[RLIMIT_NPROC].rlim_cur && 590 new_user != &root_user) { 591 free_uid(new_user); 592 return -EAGAIN; 593 } 594 595 switch_uid(new_user); 596 597 if(dumpclear) 598 { 599 current->mm->dumpable = suid_dumpable; 600 smp_wmb(); 601 } 602 current->uid = new_ruid; 603 return 0; 604 } 605 606 /* 607 * Unprivileged users may change the real uid to the effective uid 608 * or vice versa. (BSD-style) 609 * 610 * If you set the real uid at all, or set the effective uid to a value not 611 * equal to the real uid, then the saved uid is set to the new effective uid. 612 * 613 * This makes it possible for a setuid program to completely drop its 614 * privileges, which is often a useful assertion to make when you are doing 615 * a security audit over a program. 616 * 617 * The general idea is that a program which uses just setreuid() will be 618 * 100% compatible with BSD. A program which uses just setuid() will be 619 * 100% compatible with POSIX with saved IDs. 620 */ 621 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) 622 { 623 int old_ruid, old_euid, old_suid, new_ruid, new_euid; 624 int retval; 625 626 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE); 627 if (retval) 628 return retval; 629 630 new_ruid = old_ruid = current->uid; 631 new_euid = old_euid = current->euid; 632 old_suid = current->suid; 633 634 if (ruid != (uid_t) -1) { 635 new_ruid = ruid; 636 if ((old_ruid != ruid) && 637 (current->euid != ruid) && 638 !capable(CAP_SETUID)) 639 return -EPERM; 640 } 641 642 if (euid != (uid_t) -1) { 643 new_euid = euid; 644 if ((old_ruid != euid) && 645 (current->euid != euid) && 646 (current->suid != euid) && 647 !capable(CAP_SETUID)) 648 return -EPERM; 649 } 650 651 if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0) 652 return -EAGAIN; 653 654 if (new_euid != old_euid) 655 { 656 current->mm->dumpable = suid_dumpable; 657 smp_wmb(); 658 } 659 current->fsuid = current->euid = new_euid; 660 if (ruid != (uid_t) -1 || 661 (euid != (uid_t) -1 && euid != old_ruid)) 662 current->suid = current->euid; 663 current->fsuid = current->euid; 664 665 key_fsuid_changed(current); 666 667 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE); 668 } 669 670 671 672 /* 673 * setuid() is implemented like SysV with SAVED_IDS 674 * 675 * Note that SAVED_ID's is deficient in that a setuid root program 676 * like sendmail, for example, cannot set its uid to be a normal 677 * user and then switch back, because if you're root, setuid() sets 678 * the saved uid too. If you don't like this, blame the bright people 679 * in the POSIX committee and/or USG. Note that the BSD-style setreuid() 680 * will allow a root program to temporarily drop privileges and be able to 681 * regain them by swapping the real and effective uid. 682 */ 683 asmlinkage long sys_setuid(uid_t uid) 684 { 685 int old_euid = current->euid; 686 int old_ruid, old_suid, new_ruid, new_suid; 687 int retval; 688 689 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID); 690 if (retval) 691 return retval; 692 693 old_ruid = new_ruid = current->uid; 694 old_suid = current->suid; 695 new_suid = old_suid; 696 697 if (capable(CAP_SETUID)) { 698 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0) 699 return -EAGAIN; 700 new_suid = uid; 701 } else if ((uid != current->uid) && (uid != new_suid)) 702 return -EPERM; 703 704 if (old_euid != uid) 705 { 706 current->mm->dumpable = suid_dumpable; 707 smp_wmb(); 708 } 709 current->fsuid = current->euid = uid; 710 current->suid = new_suid; 711 712 key_fsuid_changed(current); 713 714 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID); 715 } 716 717 718 /* 719 * This function implements a generic ability to update ruid, euid, 720 * and suid. This allows you to implement the 4.4 compatible seteuid(). 721 */ 722 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) 723 { 724 int old_ruid = current->uid; 725 int old_euid = current->euid; 726 int old_suid = current->suid; 727 int retval; 728 729 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES); 730 if (retval) 731 return retval; 732 733 if (!capable(CAP_SETUID)) { 734 if ((ruid != (uid_t) -1) && (ruid != current->uid) && 735 (ruid != current->euid) && (ruid != current->suid)) 736 return -EPERM; 737 if ((euid != (uid_t) -1) && (euid != current->uid) && 738 (euid != current->euid) && (euid != current->suid)) 739 return -EPERM; 740 if ((suid != (uid_t) -1) && (suid != current->uid) && 741 (suid != current->euid) && (suid != current->suid)) 742 return -EPERM; 743 } 744 if (ruid != (uid_t) -1) { 745 if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0) 746 return -EAGAIN; 747 } 748 if (euid != (uid_t) -1) { 749 if (euid != current->euid) 750 { 751 current->mm->dumpable = suid_dumpable; 752 smp_wmb(); 753 } 754 current->euid = euid; 755 } 756 current->fsuid = current->euid; 757 if (suid != (uid_t) -1) 758 current->suid = suid; 759 760 key_fsuid_changed(current); 761 762 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES); 763 } 764 765 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) 766 { 767 int retval; 768 769 if (!(retval = put_user(current->uid, ruid)) && 770 !(retval = put_user(current->euid, euid))) 771 retval = put_user(current->suid, suid); 772 773 return retval; 774 } 775 776 /* 777 * Same as above, but for rgid, egid, sgid. 778 */ 779 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) 780 { 781 int retval; 782 783 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES); 784 if (retval) 785 return retval; 786 787 if (!capable(CAP_SETGID)) { 788 if ((rgid != (gid_t) -1) && (rgid != current->gid) && 789 (rgid != current->egid) && (rgid != current->sgid)) 790 return -EPERM; 791 if ((egid != (gid_t) -1) && (egid != current->gid) && 792 (egid != current->egid) && (egid != current->sgid)) 793 return -EPERM; 794 if ((sgid != (gid_t) -1) && (sgid != current->gid) && 795 (sgid != current->egid) && (sgid != current->sgid)) 796 return -EPERM; 797 } 798 if (egid != (gid_t) -1) { 799 if (egid != current->egid) 800 { 801 current->mm->dumpable = suid_dumpable; 802 smp_wmb(); 803 } 804 current->egid = egid; 805 } 806 current->fsgid = current->egid; 807 if (rgid != (gid_t) -1) 808 current->gid = rgid; 809 if (sgid != (gid_t) -1) 810 current->sgid = sgid; 811 812 key_fsgid_changed(current); 813 return 0; 814 } 815 816 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) 817 { 818 int retval; 819 820 if (!(retval = put_user(current->gid, rgid)) && 821 !(retval = put_user(current->egid, egid))) 822 retval = put_user(current->sgid, sgid); 823 824 return retval; 825 } 826 827 828 /* 829 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This 830 * is used for "access()" and for the NFS daemon (letting nfsd stay at 831 * whatever uid it wants to). It normally shadows "euid", except when 832 * explicitly set by setfsuid() or for access.. 833 */ 834 asmlinkage long sys_setfsuid(uid_t uid) 835 { 836 int old_fsuid; 837 838 old_fsuid = current->fsuid; 839 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS)) 840 return old_fsuid; 841 842 if (uid == current->uid || uid == current->euid || 843 uid == current->suid || uid == current->fsuid || 844 capable(CAP_SETUID)) 845 { 846 if (uid != old_fsuid) 847 { 848 current->mm->dumpable = suid_dumpable; 849 smp_wmb(); 850 } 851 current->fsuid = uid; 852 } 853 854 key_fsuid_changed(current); 855 856 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS); 857 858 return old_fsuid; 859 } 860 861 /* 862 * Samma p� svenska.. 863 */ 864 asmlinkage long sys_setfsgid(gid_t gid) 865 { 866 int old_fsgid; 867 868 old_fsgid = current->fsgid; 869 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS)) 870 return old_fsgid; 871 872 if (gid == current->gid || gid == current->egid || 873 gid == current->sgid || gid == current->fsgid || 874 capable(CAP_SETGID)) 875 { 876 if (gid != old_fsgid) 877 { 878 current->mm->dumpable = suid_dumpable; 879 smp_wmb(); 880 } 881 current->fsgid = gid; 882 key_fsgid_changed(current); 883 } 884 return old_fsgid; 885 } 886 887 asmlinkage long sys_times(struct tms __user * tbuf) 888 { 889 /* 890 * In the SMP world we might just be unlucky and have one of 891 * the times increment as we use it. Since the value is an 892 * atomically safe type this is just fine. Conceptually its 893 * as if the syscall took an instant longer to occur. 894 */ 895 if (tbuf) { 896 struct tms tmp; 897 cputime_t utime, stime, cutime, cstime; 898 899 #ifdef CONFIG_SMP 900 if (thread_group_empty(current)) { 901 /* 902 * Single thread case without the use of any locks. 903 * 904 * We may race with release_task if two threads are 905 * executing. However, release task first adds up the 906 * counters (__exit_signal) before removing the task 907 * from the process tasklist (__unhash_process). 908 * __exit_signal also acquires and releases the 909 * siglock which results in the proper memory ordering 910 * so that the list modifications are always visible 911 * after the counters have been updated. 912 * 913 * If the counters have been updated by the second thread 914 * but the thread has not yet been removed from the list 915 * then the other branch will be executing which will 916 * block on tasklist_lock until the exit handling of the 917 * other task is finished. 918 * 919 * This also implies that the sighand->siglock cannot 920 * be held by another processor. So we can also 921 * skip acquiring that lock. 922 */ 923 utime = cputime_add(current->signal->utime, current->utime); 924 stime = cputime_add(current->signal->utime, current->stime); 925 cutime = current->signal->cutime; 926 cstime = current->signal->cstime; 927 } else 928 #endif 929 { 930 931 /* Process with multiple threads */ 932 struct task_struct *tsk = current; 933 struct task_struct *t; 934 935 read_lock(&tasklist_lock); 936 utime = tsk->signal->utime; 937 stime = tsk->signal->stime; 938 t = tsk; 939 do { 940 utime = cputime_add(utime, t->utime); 941 stime = cputime_add(stime, t->stime); 942 t = next_thread(t); 943 } while (t != tsk); 944 945 /* 946 * While we have tasklist_lock read-locked, no dying thread 947 * can be updating current->signal->[us]time. Instead, 948 * we got their counts included in the live thread loop. 949 * However, another thread can come in right now and 950 * do a wait call that updates current->signal->c[us]time. 951 * To make sure we always see that pair updated atomically, 952 * we take the siglock around fetching them. 953 */ 954 spin_lock_irq(&tsk->sighand->siglock); 955 cutime = tsk->signal->cutime; 956 cstime = tsk->signal->cstime; 957 spin_unlock_irq(&tsk->sighand->siglock); 958 read_unlock(&tasklist_lock); 959 } 960 tmp.tms_utime = cputime_to_clock_t(utime); 961 tmp.tms_stime = cputime_to_clock_t(stime); 962 tmp.tms_cutime = cputime_to_clock_t(cutime); 963 tmp.tms_cstime = cputime_to_clock_t(cstime); 964 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) 965 return -EFAULT; 966 } 967 return (long) jiffies_64_to_clock_t(get_jiffies_64()); 968 } 969 970 /* 971 * This needs some heavy checking ... 972 * I just haven't the stomach for it. I also don't fully 973 * understand sessions/pgrp etc. Let somebody who does explain it. 974 * 975 * OK, I think I have the protection semantics right.... this is really 976 * only important on a multi-user system anyway, to make sure one user 977 * can't send a signal to a process owned by another. -TYT, 12/12/91 978 * 979 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. 980 * LBT 04.03.94 981 */ 982 983 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) 984 { 985 struct task_struct *p; 986 int err = -EINVAL; 987 988 if (!pid) 989 pid = current->pid; 990 if (!pgid) 991 pgid = pid; 992 if (pgid < 0) 993 return -EINVAL; 994 995 /* From this point forward we keep holding onto the tasklist lock 996 * so that our parent does not change from under us. -DaveM 997 */ 998 write_lock_irq(&tasklist_lock); 999 1000 err = -ESRCH; 1001 p = find_task_by_pid(pid); 1002 if (!p) 1003 goto out; 1004 1005 err = -EINVAL; 1006 if (!thread_group_leader(p)) 1007 goto out; 1008 1009 if (p->parent == current || p->real_parent == current) { 1010 err = -EPERM; 1011 if (p->signal->session != current->signal->session) 1012 goto out; 1013 err = -EACCES; 1014 if (p->did_exec) 1015 goto out; 1016 } else { 1017 err = -ESRCH; 1018 if (p != current) 1019 goto out; 1020 } 1021 1022 err = -EPERM; 1023 if (p->signal->leader) 1024 goto out; 1025 1026 if (pgid != pid) { 1027 struct task_struct *p; 1028 1029 do_each_task_pid(pgid, PIDTYPE_PGID, p) { 1030 if (p->signal->session == current->signal->session) 1031 goto ok_pgid; 1032 } while_each_task_pid(pgid, PIDTYPE_PGID, p); 1033 goto out; 1034 } 1035 1036 ok_pgid: 1037 err = security_task_setpgid(p, pgid); 1038 if (err) 1039 goto out; 1040 1041 if (process_group(p) != pgid) { 1042 detach_pid(p, PIDTYPE_PGID); 1043 p->signal->pgrp = pgid; 1044 attach_pid(p, PIDTYPE_PGID, pgid); 1045 } 1046 1047 err = 0; 1048 out: 1049 /* All paths lead to here, thus we are safe. -DaveM */ 1050 write_unlock_irq(&tasklist_lock); 1051 return err; 1052 } 1053 1054 asmlinkage long sys_getpgid(pid_t pid) 1055 { 1056 if (!pid) { 1057 return process_group(current); 1058 } else { 1059 int retval; 1060 struct task_struct *p; 1061 1062 read_lock(&tasklist_lock); 1063 p = find_task_by_pid(pid); 1064 1065 retval = -ESRCH; 1066 if (p) { 1067 retval = security_task_getpgid(p); 1068 if (!retval) 1069 retval = process_group(p); 1070 } 1071 read_unlock(&tasklist_lock); 1072 return retval; 1073 } 1074 } 1075 1076 #ifdef __ARCH_WANT_SYS_GETPGRP 1077 1078 asmlinkage long sys_getpgrp(void) 1079 { 1080 /* SMP - assuming writes are word atomic this is fine */ 1081 return process_group(current); 1082 } 1083 1084 #endif 1085 1086 asmlinkage long sys_getsid(pid_t pid) 1087 { 1088 if (!pid) { 1089 return current->signal->session; 1090 } else { 1091 int retval; 1092 struct task_struct *p; 1093 1094 read_lock(&tasklist_lock); 1095 p = find_task_by_pid(pid); 1096 1097 retval = -ESRCH; 1098 if(p) { 1099 retval = security_task_getsid(p); 1100 if (!retval) 1101 retval = p->signal->session; 1102 } 1103 read_unlock(&tasklist_lock); 1104 return retval; 1105 } 1106 } 1107 1108 asmlinkage long sys_setsid(void) 1109 { 1110 struct pid *pid; 1111 int err = -EPERM; 1112 1113 if (!thread_group_leader(current)) 1114 return -EINVAL; 1115 1116 down(&tty_sem); 1117 write_lock_irq(&tasklist_lock); 1118 1119 pid = find_pid(PIDTYPE_PGID, current->pid); 1120 if (pid) 1121 goto out; 1122 1123 current->signal->leader = 1; 1124 __set_special_pids(current->pid, current->pid); 1125 current->signal->tty = NULL; 1126 current->signal->tty_old_pgrp = 0; 1127 err = process_group(current); 1128 out: 1129 write_unlock_irq(&tasklist_lock); 1130 up(&tty_sem); 1131 return err; 1132 } 1133 1134 /* 1135 * Supplementary group IDs 1136 */ 1137 1138 /* init to 2 - one for init_task, one to ensure it is never freed */ 1139 struct group_info init_groups = { .usage = ATOMIC_INIT(2) }; 1140 1141 struct group_info *groups_alloc(int gidsetsize) 1142 { 1143 struct group_info *group_info; 1144 int nblocks; 1145 int i; 1146 1147 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK; 1148 /* Make sure we always allocate at least one indirect block pointer */ 1149 nblocks = nblocks ? : 1; 1150 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER); 1151 if (!group_info) 1152 return NULL; 1153 group_info->ngroups = gidsetsize; 1154 group_info->nblocks = nblocks; 1155 atomic_set(&group_info->usage, 1); 1156 1157 if (gidsetsize <= NGROUPS_SMALL) { 1158 group_info->blocks[0] = group_info->small_block; 1159 } else { 1160 for (i = 0; i < nblocks; i++) { 1161 gid_t *b; 1162 b = (void *)__get_free_page(GFP_USER); 1163 if (!b) 1164 goto out_undo_partial_alloc; 1165 group_info->blocks[i] = b; 1166 } 1167 } 1168 return group_info; 1169 1170 out_undo_partial_alloc: 1171 while (--i >= 0) { 1172 free_page((unsigned long)group_info->blocks[i]); 1173 } 1174 kfree(group_info); 1175 return NULL; 1176 } 1177 1178 EXPORT_SYMBOL(groups_alloc); 1179 1180 void groups_free(struct group_info *group_info) 1181 { 1182 if (group_info->blocks[0] != group_info->small_block) { 1183 int i; 1184 for (i = 0; i < group_info->nblocks; i++) 1185 free_page((unsigned long)group_info->blocks[i]); 1186 } 1187 kfree(group_info); 1188 } 1189 1190 EXPORT_SYMBOL(groups_free); 1191 1192 /* export the group_info to a user-space array */ 1193 static int groups_to_user(gid_t __user *grouplist, 1194 struct group_info *group_info) 1195 { 1196 int i; 1197 int count = group_info->ngroups; 1198 1199 for (i = 0; i < group_info->nblocks; i++) { 1200 int cp_count = min(NGROUPS_PER_BLOCK, count); 1201 int off = i * NGROUPS_PER_BLOCK; 1202 int len = cp_count * sizeof(*grouplist); 1203 1204 if (copy_to_user(grouplist+off, group_info->blocks[i], len)) 1205 return -EFAULT; 1206 1207 count -= cp_count; 1208 } 1209 return 0; 1210 } 1211 1212 /* fill a group_info from a user-space array - it must be allocated already */ 1213 static int groups_from_user(struct group_info *group_info, 1214 gid_t __user *grouplist) 1215 { 1216 int i; 1217 int count = group_info->ngroups; 1218 1219 for (i = 0; i < group_info->nblocks; i++) { 1220 int cp_count = min(NGROUPS_PER_BLOCK, count); 1221 int off = i * NGROUPS_PER_BLOCK; 1222 int len = cp_count * sizeof(*grouplist); 1223 1224 if (copy_from_user(group_info->blocks[i], grouplist+off, len)) 1225 return -EFAULT; 1226 1227 count -= cp_count; 1228 } 1229 return 0; 1230 } 1231 1232 /* a simple Shell sort */ 1233 static void groups_sort(struct group_info *group_info) 1234 { 1235 int base, max, stride; 1236 int gidsetsize = group_info->ngroups; 1237 1238 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1) 1239 ; /* nothing */ 1240 stride /= 3; 1241 1242 while (stride) { 1243 max = gidsetsize - stride; 1244 for (base = 0; base < max; base++) { 1245 int left = base; 1246 int right = left + stride; 1247 gid_t tmp = GROUP_AT(group_info, right); 1248 1249 while (left >= 0 && GROUP_AT(group_info, left) > tmp) { 1250 GROUP_AT(group_info, right) = 1251 GROUP_AT(group_info, left); 1252 right = left; 1253 left -= stride; 1254 } 1255 GROUP_AT(group_info, right) = tmp; 1256 } 1257 stride /= 3; 1258 } 1259 } 1260 1261 /* a simple bsearch */ 1262 int groups_search(struct group_info *group_info, gid_t grp) 1263 { 1264 int left, right; 1265 1266 if (!group_info) 1267 return 0; 1268 1269 left = 0; 1270 right = group_info->ngroups; 1271 while (left < right) { 1272 int mid = (left+right)/2; 1273 int cmp = grp - GROUP_AT(group_info, mid); 1274 if (cmp > 0) 1275 left = mid + 1; 1276 else if (cmp < 0) 1277 right = mid; 1278 else 1279 return 1; 1280 } 1281 return 0; 1282 } 1283 1284 /* validate and set current->group_info */ 1285 int set_current_groups(struct group_info *group_info) 1286 { 1287 int retval; 1288 struct group_info *old_info; 1289 1290 retval = security_task_setgroups(group_info); 1291 if (retval) 1292 return retval; 1293 1294 groups_sort(group_info); 1295 get_group_info(group_info); 1296 1297 task_lock(current); 1298 old_info = current->group_info; 1299 current->group_info = group_info; 1300 task_unlock(current); 1301 1302 put_group_info(old_info); 1303 1304 return 0; 1305 } 1306 1307 EXPORT_SYMBOL(set_current_groups); 1308 1309 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) 1310 { 1311 int i = 0; 1312 1313 /* 1314 * SMP: Nobody else can change our grouplist. Thus we are 1315 * safe. 1316 */ 1317 1318 if (gidsetsize < 0) 1319 return -EINVAL; 1320 1321 /* no need to grab task_lock here; it cannot change */ 1322 get_group_info(current->group_info); 1323 i = current->group_info->ngroups; 1324 if (gidsetsize) { 1325 if (i > gidsetsize) { 1326 i = -EINVAL; 1327 goto out; 1328 } 1329 if (groups_to_user(grouplist, current->group_info)) { 1330 i = -EFAULT; 1331 goto out; 1332 } 1333 } 1334 out: 1335 put_group_info(current->group_info); 1336 return i; 1337 } 1338 1339 /* 1340 * SMP: Our groups are copy-on-write. We can set them safely 1341 * without another task interfering. 1342 */ 1343 1344 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist) 1345 { 1346 struct group_info *group_info; 1347 int retval; 1348 1349 if (!capable(CAP_SETGID)) 1350 return -EPERM; 1351 if ((unsigned)gidsetsize > NGROUPS_MAX) 1352 return -EINVAL; 1353 1354 group_info = groups_alloc(gidsetsize); 1355 if (!group_info) 1356 return -ENOMEM; 1357 retval = groups_from_user(group_info, grouplist); 1358 if (retval) { 1359 put_group_info(group_info); 1360 return retval; 1361 } 1362 1363 retval = set_current_groups(group_info); 1364 put_group_info(group_info); 1365 1366 return retval; 1367 } 1368 1369 /* 1370 * Check whether we're fsgid/egid or in the supplemental group.. 1371 */ 1372 int in_group_p(gid_t grp) 1373 { 1374 int retval = 1; 1375 if (grp != current->fsgid) { 1376 get_group_info(current->group_info); 1377 retval = groups_search(current->group_info, grp); 1378 put_group_info(current->group_info); 1379 } 1380 return retval; 1381 } 1382 1383 EXPORT_SYMBOL(in_group_p); 1384 1385 int in_egroup_p(gid_t grp) 1386 { 1387 int retval = 1; 1388 if (grp != current->egid) { 1389 get_group_info(current->group_info); 1390 retval = groups_search(current->group_info, grp); 1391 put_group_info(current->group_info); 1392 } 1393 return retval; 1394 } 1395 1396 EXPORT_SYMBOL(in_egroup_p); 1397 1398 DECLARE_RWSEM(uts_sem); 1399 1400 EXPORT_SYMBOL(uts_sem); 1401 1402 asmlinkage long sys_newuname(struct new_utsname __user * name) 1403 { 1404 int errno = 0; 1405 1406 down_read(&uts_sem); 1407 if (copy_to_user(name,&system_utsname,sizeof *name)) 1408 errno = -EFAULT; 1409 up_read(&uts_sem); 1410 return errno; 1411 } 1412 1413 asmlinkage long sys_sethostname(char __user *name, int len) 1414 { 1415 int errno; 1416 char tmp[__NEW_UTS_LEN]; 1417 1418 if (!capable(CAP_SYS_ADMIN)) 1419 return -EPERM; 1420 if (len < 0 || len > __NEW_UTS_LEN) 1421 return -EINVAL; 1422 down_write(&uts_sem); 1423 errno = -EFAULT; 1424 if (!copy_from_user(tmp, name, len)) { 1425 memcpy(system_utsname.nodename, tmp, len); 1426 system_utsname.nodename[len] = 0; 1427 errno = 0; 1428 } 1429 up_write(&uts_sem); 1430 return errno; 1431 } 1432 1433 #ifdef __ARCH_WANT_SYS_GETHOSTNAME 1434 1435 asmlinkage long sys_gethostname(char __user *name, int len) 1436 { 1437 int i, errno; 1438 1439 if (len < 0) 1440 return -EINVAL; 1441 down_read(&uts_sem); 1442 i = 1 + strlen(system_utsname.nodename); 1443 if (i > len) 1444 i = len; 1445 errno = 0; 1446 if (copy_to_user(name, system_utsname.nodename, i)) 1447 errno = -EFAULT; 1448 up_read(&uts_sem); 1449 return errno; 1450 } 1451 1452 #endif 1453 1454 /* 1455 * Only setdomainname; getdomainname can be implemented by calling 1456 * uname() 1457 */ 1458 asmlinkage long sys_setdomainname(char __user *name, int len) 1459 { 1460 int errno; 1461 char tmp[__NEW_UTS_LEN]; 1462 1463 if (!capable(CAP_SYS_ADMIN)) 1464 return -EPERM; 1465 if (len < 0 || len > __NEW_UTS_LEN) 1466 return -EINVAL; 1467 1468 down_write(&uts_sem); 1469 errno = -EFAULT; 1470 if (!copy_from_user(tmp, name, len)) { 1471 memcpy(system_utsname.domainname, tmp, len); 1472 system_utsname.domainname[len] = 0; 1473 errno = 0; 1474 } 1475 up_write(&uts_sem); 1476 return errno; 1477 } 1478 1479 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim) 1480 { 1481 if (resource >= RLIM_NLIMITS) 1482 return -EINVAL; 1483 else { 1484 struct rlimit value; 1485 task_lock(current->group_leader); 1486 value = current->signal->rlim[resource]; 1487 task_unlock(current->group_leader); 1488 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; 1489 } 1490 } 1491 1492 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT 1493 1494 /* 1495 * Back compatibility for getrlimit. Needed for some apps. 1496 */ 1497 1498 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim) 1499 { 1500 struct rlimit x; 1501 if (resource >= RLIM_NLIMITS) 1502 return -EINVAL; 1503 1504 task_lock(current->group_leader); 1505 x = current->signal->rlim[resource]; 1506 task_unlock(current->group_leader); 1507 if(x.rlim_cur > 0x7FFFFFFF) 1508 x.rlim_cur = 0x7FFFFFFF; 1509 if(x.rlim_max > 0x7FFFFFFF) 1510 x.rlim_max = 0x7FFFFFFF; 1511 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0; 1512 } 1513 1514 #endif 1515 1516 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) 1517 { 1518 struct rlimit new_rlim, *old_rlim; 1519 int retval; 1520 1521 if (resource >= RLIM_NLIMITS) 1522 return -EINVAL; 1523 if(copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1524 return -EFAULT; 1525 if (new_rlim.rlim_cur > new_rlim.rlim_max) 1526 return -EINVAL; 1527 old_rlim = current->signal->rlim + resource; 1528 if ((new_rlim.rlim_max > old_rlim->rlim_max) && 1529 !capable(CAP_SYS_RESOURCE)) 1530 return -EPERM; 1531 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN) 1532 return -EPERM; 1533 1534 retval = security_task_setrlimit(resource, &new_rlim); 1535 if (retval) 1536 return retval; 1537 1538 task_lock(current->group_leader); 1539 *old_rlim = new_rlim; 1540 task_unlock(current->group_leader); 1541 1542 if (resource == RLIMIT_CPU && new_rlim.rlim_cur != RLIM_INFINITY && 1543 (cputime_eq(current->signal->it_prof_expires, cputime_zero) || 1544 new_rlim.rlim_cur <= cputime_to_secs( 1545 current->signal->it_prof_expires))) { 1546 cputime_t cputime = secs_to_cputime(new_rlim.rlim_cur); 1547 read_lock(&tasklist_lock); 1548 spin_lock_irq(¤t->sighand->siglock); 1549 set_process_cpu_timer(current, CPUCLOCK_PROF, 1550 &cputime, NULL); 1551 spin_unlock_irq(¤t->sighand->siglock); 1552 read_unlock(&tasklist_lock); 1553 } 1554 1555 return 0; 1556 } 1557 1558 /* 1559 * It would make sense to put struct rusage in the task_struct, 1560 * except that would make the task_struct be *really big*. After 1561 * task_struct gets moved into malloc'ed memory, it would 1562 * make sense to do this. It will make moving the rest of the information 1563 * a lot simpler! (Which we're not doing right now because we're not 1564 * measuring them yet). 1565 * 1566 * This expects to be called with tasklist_lock read-locked or better, 1567 * and the siglock not locked. It may momentarily take the siglock. 1568 * 1569 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have 1570 * races with threads incrementing their own counters. But since word 1571 * reads are atomic, we either get new values or old values and we don't 1572 * care which for the sums. We always take the siglock to protect reading 1573 * the c* fields from p->signal from races with exit.c updating those 1574 * fields when reaping, so a sample either gets all the additions of a 1575 * given child after it's reaped, or none so this sample is before reaping. 1576 */ 1577 1578 static void k_getrusage(struct task_struct *p, int who, struct rusage *r) 1579 { 1580 struct task_struct *t; 1581 unsigned long flags; 1582 cputime_t utime, stime; 1583 1584 memset((char *) r, 0, sizeof *r); 1585 1586 if (unlikely(!p->signal)) 1587 return; 1588 1589 switch (who) { 1590 case RUSAGE_CHILDREN: 1591 spin_lock_irqsave(&p->sighand->siglock, flags); 1592 utime = p->signal->cutime; 1593 stime = p->signal->cstime; 1594 r->ru_nvcsw = p->signal->cnvcsw; 1595 r->ru_nivcsw = p->signal->cnivcsw; 1596 r->ru_minflt = p->signal->cmin_flt; 1597 r->ru_majflt = p->signal->cmaj_flt; 1598 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1599 cputime_to_timeval(utime, &r->ru_utime); 1600 cputime_to_timeval(stime, &r->ru_stime); 1601 break; 1602 case RUSAGE_SELF: 1603 spin_lock_irqsave(&p->sighand->siglock, flags); 1604 utime = stime = cputime_zero; 1605 goto sum_group; 1606 case RUSAGE_BOTH: 1607 spin_lock_irqsave(&p->sighand->siglock, flags); 1608 utime = p->signal->cutime; 1609 stime = p->signal->cstime; 1610 r->ru_nvcsw = p->signal->cnvcsw; 1611 r->ru_nivcsw = p->signal->cnivcsw; 1612 r->ru_minflt = p->signal->cmin_flt; 1613 r->ru_majflt = p->signal->cmaj_flt; 1614 sum_group: 1615 utime = cputime_add(utime, p->signal->utime); 1616 stime = cputime_add(stime, p->signal->stime); 1617 r->ru_nvcsw += p->signal->nvcsw; 1618 r->ru_nivcsw += p->signal->nivcsw; 1619 r->ru_minflt += p->signal->min_flt; 1620 r->ru_majflt += p->signal->maj_flt; 1621 t = p; 1622 do { 1623 utime = cputime_add(utime, t->utime); 1624 stime = cputime_add(stime, t->stime); 1625 r->ru_nvcsw += t->nvcsw; 1626 r->ru_nivcsw += t->nivcsw; 1627 r->ru_minflt += t->min_flt; 1628 r->ru_majflt += t->maj_flt; 1629 t = next_thread(t); 1630 } while (t != p); 1631 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1632 cputime_to_timeval(utime, &r->ru_utime); 1633 cputime_to_timeval(stime, &r->ru_stime); 1634 break; 1635 default: 1636 BUG(); 1637 } 1638 } 1639 1640 int getrusage(struct task_struct *p, int who, struct rusage __user *ru) 1641 { 1642 struct rusage r; 1643 read_lock(&tasklist_lock); 1644 k_getrusage(p, who, &r); 1645 read_unlock(&tasklist_lock); 1646 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1647 } 1648 1649 asmlinkage long sys_getrusage(int who, struct rusage __user *ru) 1650 { 1651 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) 1652 return -EINVAL; 1653 return getrusage(current, who, ru); 1654 } 1655 1656 asmlinkage long sys_umask(int mask) 1657 { 1658 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); 1659 return mask; 1660 } 1661 1662 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, 1663 unsigned long arg4, unsigned long arg5) 1664 { 1665 long error; 1666 int sig; 1667 1668 error = security_task_prctl(option, arg2, arg3, arg4, arg5); 1669 if (error) 1670 return error; 1671 1672 switch (option) { 1673 case PR_SET_PDEATHSIG: 1674 sig = arg2; 1675 if (!valid_signal(sig)) { 1676 error = -EINVAL; 1677 break; 1678 } 1679 current->pdeath_signal = sig; 1680 break; 1681 case PR_GET_PDEATHSIG: 1682 error = put_user(current->pdeath_signal, (int __user *)arg2); 1683 break; 1684 case PR_GET_DUMPABLE: 1685 if (current->mm->dumpable) 1686 error = 1; 1687 break; 1688 case PR_SET_DUMPABLE: 1689 if (arg2 < 0 || arg2 > 2) { 1690 error = -EINVAL; 1691 break; 1692 } 1693 current->mm->dumpable = arg2; 1694 break; 1695 1696 case PR_SET_UNALIGN: 1697 error = SET_UNALIGN_CTL(current, arg2); 1698 break; 1699 case PR_GET_UNALIGN: 1700 error = GET_UNALIGN_CTL(current, arg2); 1701 break; 1702 case PR_SET_FPEMU: 1703 error = SET_FPEMU_CTL(current, arg2); 1704 break; 1705 case PR_GET_FPEMU: 1706 error = GET_FPEMU_CTL(current, arg2); 1707 break; 1708 case PR_SET_FPEXC: 1709 error = SET_FPEXC_CTL(current, arg2); 1710 break; 1711 case PR_GET_FPEXC: 1712 error = GET_FPEXC_CTL(current, arg2); 1713 break; 1714 case PR_GET_TIMING: 1715 error = PR_TIMING_STATISTICAL; 1716 break; 1717 case PR_SET_TIMING: 1718 if (arg2 == PR_TIMING_STATISTICAL) 1719 error = 0; 1720 else 1721 error = -EINVAL; 1722 break; 1723 1724 case PR_GET_KEEPCAPS: 1725 if (current->keep_capabilities) 1726 error = 1; 1727 break; 1728 case PR_SET_KEEPCAPS: 1729 if (arg2 != 0 && arg2 != 1) { 1730 error = -EINVAL; 1731 break; 1732 } 1733 current->keep_capabilities = arg2; 1734 break; 1735 case PR_SET_NAME: { 1736 struct task_struct *me = current; 1737 unsigned char ncomm[sizeof(me->comm)]; 1738 1739 ncomm[sizeof(me->comm)-1] = 0; 1740 if (strncpy_from_user(ncomm, (char __user *)arg2, 1741 sizeof(me->comm)-1) < 0) 1742 return -EFAULT; 1743 set_task_comm(me, ncomm); 1744 return 0; 1745 } 1746 case PR_GET_NAME: { 1747 struct task_struct *me = current; 1748 unsigned char tcomm[sizeof(me->comm)]; 1749 1750 get_task_comm(tcomm, me); 1751 if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm))) 1752 return -EFAULT; 1753 return 0; 1754 } 1755 default: 1756 error = -EINVAL; 1757 break; 1758 } 1759 return error; 1760 } 1761