1 /* 2 * linux/kernel/sys.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/config.h> 8 #include <linux/module.h> 9 #include <linux/mm.h> 10 #include <linux/utsname.h> 11 #include <linux/mman.h> 12 #include <linux/smp_lock.h> 13 #include <linux/notifier.h> 14 #include <linux/reboot.h> 15 #include <linux/prctl.h> 16 #include <linux/init.h> 17 #include <linux/highuid.h> 18 #include <linux/fs.h> 19 #include <linux/kernel.h> 20 #include <linux/kexec.h> 21 #include <linux/workqueue.h> 22 #include <linux/capability.h> 23 #include <linux/device.h> 24 #include <linux/key.h> 25 #include <linux/times.h> 26 #include <linux/posix-timers.h> 27 #include <linux/security.h> 28 #include <linux/dcookies.h> 29 #include <linux/suspend.h> 30 #include <linux/tty.h> 31 #include <linux/signal.h> 32 #include <linux/cn_proc.h> 33 34 #include <linux/compat.h> 35 #include <linux/syscalls.h> 36 #include <linux/kprobes.h> 37 38 #include <asm/uaccess.h> 39 #include <asm/io.h> 40 #include <asm/unistd.h> 41 42 #ifndef SET_UNALIGN_CTL 43 # define SET_UNALIGN_CTL(a,b) (-EINVAL) 44 #endif 45 #ifndef GET_UNALIGN_CTL 46 # define GET_UNALIGN_CTL(a,b) (-EINVAL) 47 #endif 48 #ifndef SET_FPEMU_CTL 49 # define SET_FPEMU_CTL(a,b) (-EINVAL) 50 #endif 51 #ifndef GET_FPEMU_CTL 52 # define GET_FPEMU_CTL(a,b) (-EINVAL) 53 #endif 54 #ifndef SET_FPEXC_CTL 55 # define SET_FPEXC_CTL(a,b) (-EINVAL) 56 #endif 57 #ifndef GET_FPEXC_CTL 58 # define GET_FPEXC_CTL(a,b) (-EINVAL) 59 #endif 60 61 /* 62 * this is where the system-wide overflow UID and GID are defined, for 63 * architectures that now have 32-bit UID/GID but didn't in the past 64 */ 65 66 int overflowuid = DEFAULT_OVERFLOWUID; 67 int overflowgid = DEFAULT_OVERFLOWGID; 68 69 #ifdef CONFIG_UID16 70 EXPORT_SYMBOL(overflowuid); 71 EXPORT_SYMBOL(overflowgid); 72 #endif 73 74 /* 75 * the same as above, but for filesystems which can only store a 16-bit 76 * UID and GID. as such, this is needed on all architectures 77 */ 78 79 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; 80 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID; 81 82 EXPORT_SYMBOL(fs_overflowuid); 83 EXPORT_SYMBOL(fs_overflowgid); 84 85 /* 86 * this indicates whether you can reboot with ctrl-alt-del: the default is yes 87 */ 88 89 int C_A_D = 1; 90 int cad_pid = 1; 91 92 /* 93 * Notifier list for kernel code which wants to be called 94 * at shutdown. This is used to stop any idling DMA operations 95 * and the like. 96 */ 97 98 static struct notifier_block *reboot_notifier_list; 99 static DEFINE_RWLOCK(notifier_lock); 100 101 /** 102 * notifier_chain_register - Add notifier to a notifier chain 103 * @list: Pointer to root list pointer 104 * @n: New entry in notifier chain 105 * 106 * Adds a notifier to a notifier chain. 107 * 108 * Currently always returns zero. 109 */ 110 111 int notifier_chain_register(struct notifier_block **list, struct notifier_block *n) 112 { 113 write_lock(¬ifier_lock); 114 while(*list) 115 { 116 if(n->priority > (*list)->priority) 117 break; 118 list= &((*list)->next); 119 } 120 n->next = *list; 121 *list=n; 122 write_unlock(¬ifier_lock); 123 return 0; 124 } 125 126 EXPORT_SYMBOL(notifier_chain_register); 127 128 /** 129 * notifier_chain_unregister - Remove notifier from a notifier chain 130 * @nl: Pointer to root list pointer 131 * @n: New entry in notifier chain 132 * 133 * Removes a notifier from a notifier chain. 134 * 135 * Returns zero on success, or %-ENOENT on failure. 136 */ 137 138 int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n) 139 { 140 write_lock(¬ifier_lock); 141 while((*nl)!=NULL) 142 { 143 if((*nl)==n) 144 { 145 *nl=n->next; 146 write_unlock(¬ifier_lock); 147 return 0; 148 } 149 nl=&((*nl)->next); 150 } 151 write_unlock(¬ifier_lock); 152 return -ENOENT; 153 } 154 155 EXPORT_SYMBOL(notifier_chain_unregister); 156 157 /** 158 * notifier_call_chain - Call functions in a notifier chain 159 * @n: Pointer to root pointer of notifier chain 160 * @val: Value passed unmodified to notifier function 161 * @v: Pointer passed unmodified to notifier function 162 * 163 * Calls each function in a notifier chain in turn. 164 * 165 * If the return value of the notifier can be and'd 166 * with %NOTIFY_STOP_MASK, then notifier_call_chain 167 * will return immediately, with the return value of 168 * the notifier function which halted execution. 169 * Otherwise, the return value is the return value 170 * of the last notifier function called. 171 */ 172 173 int __kprobes notifier_call_chain(struct notifier_block **n, unsigned long val, void *v) 174 { 175 int ret=NOTIFY_DONE; 176 struct notifier_block *nb = *n; 177 178 while(nb) 179 { 180 ret=nb->notifier_call(nb,val,v); 181 if(ret&NOTIFY_STOP_MASK) 182 { 183 return ret; 184 } 185 nb=nb->next; 186 } 187 return ret; 188 } 189 190 EXPORT_SYMBOL(notifier_call_chain); 191 192 /** 193 * register_reboot_notifier - Register function to be called at reboot time 194 * @nb: Info about notifier function to be called 195 * 196 * Registers a function with the list of functions 197 * to be called at reboot time. 198 * 199 * Currently always returns zero, as notifier_chain_register 200 * always returns zero. 201 */ 202 203 int register_reboot_notifier(struct notifier_block * nb) 204 { 205 return notifier_chain_register(&reboot_notifier_list, nb); 206 } 207 208 EXPORT_SYMBOL(register_reboot_notifier); 209 210 /** 211 * unregister_reboot_notifier - Unregister previously registered reboot notifier 212 * @nb: Hook to be unregistered 213 * 214 * Unregisters a previously registered reboot 215 * notifier function. 216 * 217 * Returns zero on success, or %-ENOENT on failure. 218 */ 219 220 int unregister_reboot_notifier(struct notifier_block * nb) 221 { 222 return notifier_chain_unregister(&reboot_notifier_list, nb); 223 } 224 225 EXPORT_SYMBOL(unregister_reboot_notifier); 226 227 #ifndef CONFIG_SECURITY 228 int capable(int cap) 229 { 230 if (cap_raised(current->cap_effective, cap)) { 231 current->flags |= PF_SUPERPRIV; 232 return 1; 233 } 234 return 0; 235 } 236 EXPORT_SYMBOL(capable); 237 #endif 238 239 static int set_one_prio(struct task_struct *p, int niceval, int error) 240 { 241 int no_nice; 242 243 if (p->uid != current->euid && 244 p->euid != current->euid && !capable(CAP_SYS_NICE)) { 245 error = -EPERM; 246 goto out; 247 } 248 if (niceval < task_nice(p) && !can_nice(p, niceval)) { 249 error = -EACCES; 250 goto out; 251 } 252 no_nice = security_task_setnice(p, niceval); 253 if (no_nice) { 254 error = no_nice; 255 goto out; 256 } 257 if (error == -ESRCH) 258 error = 0; 259 set_user_nice(p, niceval); 260 out: 261 return error; 262 } 263 264 asmlinkage long sys_setpriority(int which, int who, int niceval) 265 { 266 struct task_struct *g, *p; 267 struct user_struct *user; 268 int error = -EINVAL; 269 270 if (which > 2 || which < 0) 271 goto out; 272 273 /* normalize: avoid signed division (rounding problems) */ 274 error = -ESRCH; 275 if (niceval < -20) 276 niceval = -20; 277 if (niceval > 19) 278 niceval = 19; 279 280 read_lock(&tasklist_lock); 281 switch (which) { 282 case PRIO_PROCESS: 283 if (!who) 284 who = current->pid; 285 p = find_task_by_pid(who); 286 if (p) 287 error = set_one_prio(p, niceval, error); 288 break; 289 case PRIO_PGRP: 290 if (!who) 291 who = process_group(current); 292 do_each_task_pid(who, PIDTYPE_PGID, p) { 293 error = set_one_prio(p, niceval, error); 294 } while_each_task_pid(who, PIDTYPE_PGID, p); 295 break; 296 case PRIO_USER: 297 user = current->user; 298 if (!who) 299 who = current->uid; 300 else 301 if ((who != current->uid) && !(user = find_user(who))) 302 goto out_unlock; /* No processes for this user */ 303 304 do_each_thread(g, p) 305 if (p->uid == who) 306 error = set_one_prio(p, niceval, error); 307 while_each_thread(g, p); 308 if (who != current->uid) 309 free_uid(user); /* For find_user() */ 310 break; 311 } 312 out_unlock: 313 read_unlock(&tasklist_lock); 314 out: 315 return error; 316 } 317 318 /* 319 * Ugh. To avoid negative return values, "getpriority()" will 320 * not return the normal nice-value, but a negated value that 321 * has been offset by 20 (ie it returns 40..1 instead of -20..19) 322 * to stay compatible. 323 */ 324 asmlinkage long sys_getpriority(int which, int who) 325 { 326 struct task_struct *g, *p; 327 struct user_struct *user; 328 long niceval, retval = -ESRCH; 329 330 if (which > 2 || which < 0) 331 return -EINVAL; 332 333 read_lock(&tasklist_lock); 334 switch (which) { 335 case PRIO_PROCESS: 336 if (!who) 337 who = current->pid; 338 p = find_task_by_pid(who); 339 if (p) { 340 niceval = 20 - task_nice(p); 341 if (niceval > retval) 342 retval = niceval; 343 } 344 break; 345 case PRIO_PGRP: 346 if (!who) 347 who = process_group(current); 348 do_each_task_pid(who, PIDTYPE_PGID, p) { 349 niceval = 20 - task_nice(p); 350 if (niceval > retval) 351 retval = niceval; 352 } while_each_task_pid(who, PIDTYPE_PGID, p); 353 break; 354 case PRIO_USER: 355 user = current->user; 356 if (!who) 357 who = current->uid; 358 else 359 if ((who != current->uid) && !(user = find_user(who))) 360 goto out_unlock; /* No processes for this user */ 361 362 do_each_thread(g, p) 363 if (p->uid == who) { 364 niceval = 20 - task_nice(p); 365 if (niceval > retval) 366 retval = niceval; 367 } 368 while_each_thread(g, p); 369 if (who != current->uid) 370 free_uid(user); /* for find_user() */ 371 break; 372 } 373 out_unlock: 374 read_unlock(&tasklist_lock); 375 376 return retval; 377 } 378 379 /** 380 * emergency_restart - reboot the system 381 * 382 * Without shutting down any hardware or taking any locks 383 * reboot the system. This is called when we know we are in 384 * trouble so this is our best effort to reboot. This is 385 * safe to call in interrupt context. 386 */ 387 void emergency_restart(void) 388 { 389 machine_emergency_restart(); 390 } 391 EXPORT_SYMBOL_GPL(emergency_restart); 392 393 void kernel_restart_prepare(char *cmd) 394 { 395 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); 396 system_state = SYSTEM_RESTART; 397 device_shutdown(); 398 } 399 400 /** 401 * kernel_restart - reboot the system 402 * @cmd: pointer to buffer containing command to execute for restart 403 * or %NULL 404 * 405 * Shutdown everything and perform a clean reboot. 406 * This is not safe to call in interrupt context. 407 */ 408 void kernel_restart(char *cmd) 409 { 410 kernel_restart_prepare(cmd); 411 if (!cmd) { 412 printk(KERN_EMERG "Restarting system.\n"); 413 } else { 414 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); 415 } 416 printk(".\n"); 417 machine_restart(cmd); 418 } 419 EXPORT_SYMBOL_GPL(kernel_restart); 420 421 /** 422 * kernel_kexec - reboot the system 423 * 424 * Move into place and start executing a preloaded standalone 425 * executable. If nothing was preloaded return an error. 426 */ 427 void kernel_kexec(void) 428 { 429 #ifdef CONFIG_KEXEC 430 struct kimage *image; 431 image = xchg(&kexec_image, NULL); 432 if (!image) { 433 return; 434 } 435 kernel_restart_prepare(NULL); 436 printk(KERN_EMERG "Starting new kernel\n"); 437 machine_shutdown(); 438 machine_kexec(image); 439 #endif 440 } 441 EXPORT_SYMBOL_GPL(kernel_kexec); 442 443 void kernel_shutdown_prepare(enum system_states state) 444 { 445 notifier_call_chain(&reboot_notifier_list, 446 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); 447 system_state = state; 448 device_shutdown(); 449 } 450 /** 451 * kernel_halt - halt the system 452 * 453 * Shutdown everything and perform a clean system halt. 454 */ 455 void kernel_halt(void) 456 { 457 kernel_shutdown_prepare(SYSTEM_HALT); 458 printk(KERN_EMERG "System halted.\n"); 459 machine_halt(); 460 } 461 462 EXPORT_SYMBOL_GPL(kernel_halt); 463 464 /** 465 * kernel_power_off - power_off the system 466 * 467 * Shutdown everything and perform a clean system power_off. 468 */ 469 void kernel_power_off(void) 470 { 471 kernel_shutdown_prepare(SYSTEM_POWER_OFF); 472 printk(KERN_EMERG "Power down.\n"); 473 machine_power_off(); 474 } 475 EXPORT_SYMBOL_GPL(kernel_power_off); 476 /* 477 * Reboot system call: for obvious reasons only root may call it, 478 * and even root needs to set up some magic numbers in the registers 479 * so that some mistake won't make this reboot the whole machine. 480 * You can also set the meaning of the ctrl-alt-del-key here. 481 * 482 * reboot doesn't sync: do that yourself before calling this. 483 */ 484 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg) 485 { 486 char buffer[256]; 487 488 /* We only trust the superuser with rebooting the system. */ 489 if (!capable(CAP_SYS_BOOT)) 490 return -EPERM; 491 492 /* For safety, we require "magic" arguments. */ 493 if (magic1 != LINUX_REBOOT_MAGIC1 || 494 (magic2 != LINUX_REBOOT_MAGIC2 && 495 magic2 != LINUX_REBOOT_MAGIC2A && 496 magic2 != LINUX_REBOOT_MAGIC2B && 497 magic2 != LINUX_REBOOT_MAGIC2C)) 498 return -EINVAL; 499 500 /* Instead of trying to make the power_off code look like 501 * halt when pm_power_off is not set do it the easy way. 502 */ 503 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off) 504 cmd = LINUX_REBOOT_CMD_HALT; 505 506 lock_kernel(); 507 switch (cmd) { 508 case LINUX_REBOOT_CMD_RESTART: 509 kernel_restart(NULL); 510 break; 511 512 case LINUX_REBOOT_CMD_CAD_ON: 513 C_A_D = 1; 514 break; 515 516 case LINUX_REBOOT_CMD_CAD_OFF: 517 C_A_D = 0; 518 break; 519 520 case LINUX_REBOOT_CMD_HALT: 521 kernel_halt(); 522 unlock_kernel(); 523 do_exit(0); 524 break; 525 526 case LINUX_REBOOT_CMD_POWER_OFF: 527 kernel_power_off(); 528 unlock_kernel(); 529 do_exit(0); 530 break; 531 532 case LINUX_REBOOT_CMD_RESTART2: 533 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) { 534 unlock_kernel(); 535 return -EFAULT; 536 } 537 buffer[sizeof(buffer) - 1] = '\0'; 538 539 kernel_restart(buffer); 540 break; 541 542 case LINUX_REBOOT_CMD_KEXEC: 543 kernel_kexec(); 544 unlock_kernel(); 545 return -EINVAL; 546 547 #ifdef CONFIG_SOFTWARE_SUSPEND 548 case LINUX_REBOOT_CMD_SW_SUSPEND: 549 { 550 int ret = software_suspend(); 551 unlock_kernel(); 552 return ret; 553 } 554 #endif 555 556 default: 557 unlock_kernel(); 558 return -EINVAL; 559 } 560 unlock_kernel(); 561 return 0; 562 } 563 564 static void deferred_cad(void *dummy) 565 { 566 kernel_restart(NULL); 567 } 568 569 /* 570 * This function gets called by ctrl-alt-del - ie the keyboard interrupt. 571 * As it's called within an interrupt, it may NOT sync: the only choice 572 * is whether to reboot at once, or just ignore the ctrl-alt-del. 573 */ 574 void ctrl_alt_del(void) 575 { 576 static DECLARE_WORK(cad_work, deferred_cad, NULL); 577 578 if (C_A_D) 579 schedule_work(&cad_work); 580 else 581 kill_proc(cad_pid, SIGINT, 1); 582 } 583 584 585 /* 586 * Unprivileged users may change the real gid to the effective gid 587 * or vice versa. (BSD-style) 588 * 589 * If you set the real gid at all, or set the effective gid to a value not 590 * equal to the real gid, then the saved gid is set to the new effective gid. 591 * 592 * This makes it possible for a setgid program to completely drop its 593 * privileges, which is often a useful assertion to make when you are doing 594 * a security audit over a program. 595 * 596 * The general idea is that a program which uses just setregid() will be 597 * 100% compatible with BSD. A program which uses just setgid() will be 598 * 100% compatible with POSIX with saved IDs. 599 * 600 * SMP: There are not races, the GIDs are checked only by filesystem 601 * operations (as far as semantic preservation is concerned). 602 */ 603 asmlinkage long sys_setregid(gid_t rgid, gid_t egid) 604 { 605 int old_rgid = current->gid; 606 int old_egid = current->egid; 607 int new_rgid = old_rgid; 608 int new_egid = old_egid; 609 int retval; 610 611 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE); 612 if (retval) 613 return retval; 614 615 if (rgid != (gid_t) -1) { 616 if ((old_rgid == rgid) || 617 (current->egid==rgid) || 618 capable(CAP_SETGID)) 619 new_rgid = rgid; 620 else 621 return -EPERM; 622 } 623 if (egid != (gid_t) -1) { 624 if ((old_rgid == egid) || 625 (current->egid == egid) || 626 (current->sgid == egid) || 627 capable(CAP_SETGID)) 628 new_egid = egid; 629 else { 630 return -EPERM; 631 } 632 } 633 if (new_egid != old_egid) 634 { 635 current->mm->dumpable = suid_dumpable; 636 smp_wmb(); 637 } 638 if (rgid != (gid_t) -1 || 639 (egid != (gid_t) -1 && egid != old_rgid)) 640 current->sgid = new_egid; 641 current->fsgid = new_egid; 642 current->egid = new_egid; 643 current->gid = new_rgid; 644 key_fsgid_changed(current); 645 proc_id_connector(current, PROC_EVENT_GID); 646 return 0; 647 } 648 649 /* 650 * setgid() is implemented like SysV w/ SAVED_IDS 651 * 652 * SMP: Same implicit races as above. 653 */ 654 asmlinkage long sys_setgid(gid_t gid) 655 { 656 int old_egid = current->egid; 657 int retval; 658 659 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID); 660 if (retval) 661 return retval; 662 663 if (capable(CAP_SETGID)) 664 { 665 if(old_egid != gid) 666 { 667 current->mm->dumpable = suid_dumpable; 668 smp_wmb(); 669 } 670 current->gid = current->egid = current->sgid = current->fsgid = gid; 671 } 672 else if ((gid == current->gid) || (gid == current->sgid)) 673 { 674 if(old_egid != gid) 675 { 676 current->mm->dumpable = suid_dumpable; 677 smp_wmb(); 678 } 679 current->egid = current->fsgid = gid; 680 } 681 else 682 return -EPERM; 683 684 key_fsgid_changed(current); 685 proc_id_connector(current, PROC_EVENT_GID); 686 return 0; 687 } 688 689 static int set_user(uid_t new_ruid, int dumpclear) 690 { 691 struct user_struct *new_user; 692 693 new_user = alloc_uid(new_ruid); 694 if (!new_user) 695 return -EAGAIN; 696 697 if (atomic_read(&new_user->processes) >= 698 current->signal->rlim[RLIMIT_NPROC].rlim_cur && 699 new_user != &root_user) { 700 free_uid(new_user); 701 return -EAGAIN; 702 } 703 704 switch_uid(new_user); 705 706 if(dumpclear) 707 { 708 current->mm->dumpable = suid_dumpable; 709 smp_wmb(); 710 } 711 current->uid = new_ruid; 712 return 0; 713 } 714 715 /* 716 * Unprivileged users may change the real uid to the effective uid 717 * or vice versa. (BSD-style) 718 * 719 * If you set the real uid at all, or set the effective uid to a value not 720 * equal to the real uid, then the saved uid is set to the new effective uid. 721 * 722 * This makes it possible for a setuid program to completely drop its 723 * privileges, which is often a useful assertion to make when you are doing 724 * a security audit over a program. 725 * 726 * The general idea is that a program which uses just setreuid() will be 727 * 100% compatible with BSD. A program which uses just setuid() will be 728 * 100% compatible with POSIX with saved IDs. 729 */ 730 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) 731 { 732 int old_ruid, old_euid, old_suid, new_ruid, new_euid; 733 int retval; 734 735 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE); 736 if (retval) 737 return retval; 738 739 new_ruid = old_ruid = current->uid; 740 new_euid = old_euid = current->euid; 741 old_suid = current->suid; 742 743 if (ruid != (uid_t) -1) { 744 new_ruid = ruid; 745 if ((old_ruid != ruid) && 746 (current->euid != ruid) && 747 !capable(CAP_SETUID)) 748 return -EPERM; 749 } 750 751 if (euid != (uid_t) -1) { 752 new_euid = euid; 753 if ((old_ruid != euid) && 754 (current->euid != euid) && 755 (current->suid != euid) && 756 !capable(CAP_SETUID)) 757 return -EPERM; 758 } 759 760 if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0) 761 return -EAGAIN; 762 763 if (new_euid != old_euid) 764 { 765 current->mm->dumpable = suid_dumpable; 766 smp_wmb(); 767 } 768 current->fsuid = current->euid = new_euid; 769 if (ruid != (uid_t) -1 || 770 (euid != (uid_t) -1 && euid != old_ruid)) 771 current->suid = current->euid; 772 current->fsuid = current->euid; 773 774 key_fsuid_changed(current); 775 proc_id_connector(current, PROC_EVENT_UID); 776 777 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE); 778 } 779 780 781 782 /* 783 * setuid() is implemented like SysV with SAVED_IDS 784 * 785 * Note that SAVED_ID's is deficient in that a setuid root program 786 * like sendmail, for example, cannot set its uid to be a normal 787 * user and then switch back, because if you're root, setuid() sets 788 * the saved uid too. If you don't like this, blame the bright people 789 * in the POSIX committee and/or USG. Note that the BSD-style setreuid() 790 * will allow a root program to temporarily drop privileges and be able to 791 * regain them by swapping the real and effective uid. 792 */ 793 asmlinkage long sys_setuid(uid_t uid) 794 { 795 int old_euid = current->euid; 796 int old_ruid, old_suid, new_ruid, new_suid; 797 int retval; 798 799 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID); 800 if (retval) 801 return retval; 802 803 old_ruid = new_ruid = current->uid; 804 old_suid = current->suid; 805 new_suid = old_suid; 806 807 if (capable(CAP_SETUID)) { 808 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0) 809 return -EAGAIN; 810 new_suid = uid; 811 } else if ((uid != current->uid) && (uid != new_suid)) 812 return -EPERM; 813 814 if (old_euid != uid) 815 { 816 current->mm->dumpable = suid_dumpable; 817 smp_wmb(); 818 } 819 current->fsuid = current->euid = uid; 820 current->suid = new_suid; 821 822 key_fsuid_changed(current); 823 proc_id_connector(current, PROC_EVENT_UID); 824 825 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID); 826 } 827 828 829 /* 830 * This function implements a generic ability to update ruid, euid, 831 * and suid. This allows you to implement the 4.4 compatible seteuid(). 832 */ 833 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) 834 { 835 int old_ruid = current->uid; 836 int old_euid = current->euid; 837 int old_suid = current->suid; 838 int retval; 839 840 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES); 841 if (retval) 842 return retval; 843 844 if (!capable(CAP_SETUID)) { 845 if ((ruid != (uid_t) -1) && (ruid != current->uid) && 846 (ruid != current->euid) && (ruid != current->suid)) 847 return -EPERM; 848 if ((euid != (uid_t) -1) && (euid != current->uid) && 849 (euid != current->euid) && (euid != current->suid)) 850 return -EPERM; 851 if ((suid != (uid_t) -1) && (suid != current->uid) && 852 (suid != current->euid) && (suid != current->suid)) 853 return -EPERM; 854 } 855 if (ruid != (uid_t) -1) { 856 if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0) 857 return -EAGAIN; 858 } 859 if (euid != (uid_t) -1) { 860 if (euid != current->euid) 861 { 862 current->mm->dumpable = suid_dumpable; 863 smp_wmb(); 864 } 865 current->euid = euid; 866 } 867 current->fsuid = current->euid; 868 if (suid != (uid_t) -1) 869 current->suid = suid; 870 871 key_fsuid_changed(current); 872 proc_id_connector(current, PROC_EVENT_UID); 873 874 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES); 875 } 876 877 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) 878 { 879 int retval; 880 881 if (!(retval = put_user(current->uid, ruid)) && 882 !(retval = put_user(current->euid, euid))) 883 retval = put_user(current->suid, suid); 884 885 return retval; 886 } 887 888 /* 889 * Same as above, but for rgid, egid, sgid. 890 */ 891 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) 892 { 893 int retval; 894 895 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES); 896 if (retval) 897 return retval; 898 899 if (!capable(CAP_SETGID)) { 900 if ((rgid != (gid_t) -1) && (rgid != current->gid) && 901 (rgid != current->egid) && (rgid != current->sgid)) 902 return -EPERM; 903 if ((egid != (gid_t) -1) && (egid != current->gid) && 904 (egid != current->egid) && (egid != current->sgid)) 905 return -EPERM; 906 if ((sgid != (gid_t) -1) && (sgid != current->gid) && 907 (sgid != current->egid) && (sgid != current->sgid)) 908 return -EPERM; 909 } 910 if (egid != (gid_t) -1) { 911 if (egid != current->egid) 912 { 913 current->mm->dumpable = suid_dumpable; 914 smp_wmb(); 915 } 916 current->egid = egid; 917 } 918 current->fsgid = current->egid; 919 if (rgid != (gid_t) -1) 920 current->gid = rgid; 921 if (sgid != (gid_t) -1) 922 current->sgid = sgid; 923 924 key_fsgid_changed(current); 925 proc_id_connector(current, PROC_EVENT_GID); 926 return 0; 927 } 928 929 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) 930 { 931 int retval; 932 933 if (!(retval = put_user(current->gid, rgid)) && 934 !(retval = put_user(current->egid, egid))) 935 retval = put_user(current->sgid, sgid); 936 937 return retval; 938 } 939 940 941 /* 942 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This 943 * is used for "access()" and for the NFS daemon (letting nfsd stay at 944 * whatever uid it wants to). It normally shadows "euid", except when 945 * explicitly set by setfsuid() or for access.. 946 */ 947 asmlinkage long sys_setfsuid(uid_t uid) 948 { 949 int old_fsuid; 950 951 old_fsuid = current->fsuid; 952 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS)) 953 return old_fsuid; 954 955 if (uid == current->uid || uid == current->euid || 956 uid == current->suid || uid == current->fsuid || 957 capable(CAP_SETUID)) 958 { 959 if (uid != old_fsuid) 960 { 961 current->mm->dumpable = suid_dumpable; 962 smp_wmb(); 963 } 964 current->fsuid = uid; 965 } 966 967 key_fsuid_changed(current); 968 proc_id_connector(current, PROC_EVENT_UID); 969 970 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS); 971 972 return old_fsuid; 973 } 974 975 /* 976 * Samma p� svenska.. 977 */ 978 asmlinkage long sys_setfsgid(gid_t gid) 979 { 980 int old_fsgid; 981 982 old_fsgid = current->fsgid; 983 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS)) 984 return old_fsgid; 985 986 if (gid == current->gid || gid == current->egid || 987 gid == current->sgid || gid == current->fsgid || 988 capable(CAP_SETGID)) 989 { 990 if (gid != old_fsgid) 991 { 992 current->mm->dumpable = suid_dumpable; 993 smp_wmb(); 994 } 995 current->fsgid = gid; 996 key_fsgid_changed(current); 997 proc_id_connector(current, PROC_EVENT_GID); 998 } 999 return old_fsgid; 1000 } 1001 1002 asmlinkage long sys_times(struct tms __user * tbuf) 1003 { 1004 /* 1005 * In the SMP world we might just be unlucky and have one of 1006 * the times increment as we use it. Since the value is an 1007 * atomically safe type this is just fine. Conceptually its 1008 * as if the syscall took an instant longer to occur. 1009 */ 1010 if (tbuf) { 1011 struct tms tmp; 1012 cputime_t utime, stime, cutime, cstime; 1013 1014 #ifdef CONFIG_SMP 1015 if (thread_group_empty(current)) { 1016 /* 1017 * Single thread case without the use of any locks. 1018 * 1019 * We may race with release_task if two threads are 1020 * executing. However, release task first adds up the 1021 * counters (__exit_signal) before removing the task 1022 * from the process tasklist (__unhash_process). 1023 * __exit_signal also acquires and releases the 1024 * siglock which results in the proper memory ordering 1025 * so that the list modifications are always visible 1026 * after the counters have been updated. 1027 * 1028 * If the counters have been updated by the second thread 1029 * but the thread has not yet been removed from the list 1030 * then the other branch will be executing which will 1031 * block on tasklist_lock until the exit handling of the 1032 * other task is finished. 1033 * 1034 * This also implies that the sighand->siglock cannot 1035 * be held by another processor. So we can also 1036 * skip acquiring that lock. 1037 */ 1038 utime = cputime_add(current->signal->utime, current->utime); 1039 stime = cputime_add(current->signal->utime, current->stime); 1040 cutime = current->signal->cutime; 1041 cstime = current->signal->cstime; 1042 } else 1043 #endif 1044 { 1045 1046 /* Process with multiple threads */ 1047 struct task_struct *tsk = current; 1048 struct task_struct *t; 1049 1050 read_lock(&tasklist_lock); 1051 utime = tsk->signal->utime; 1052 stime = tsk->signal->stime; 1053 t = tsk; 1054 do { 1055 utime = cputime_add(utime, t->utime); 1056 stime = cputime_add(stime, t->stime); 1057 t = next_thread(t); 1058 } while (t != tsk); 1059 1060 /* 1061 * While we have tasklist_lock read-locked, no dying thread 1062 * can be updating current->signal->[us]time. Instead, 1063 * we got their counts included in the live thread loop. 1064 * However, another thread can come in right now and 1065 * do a wait call that updates current->signal->c[us]time. 1066 * To make sure we always see that pair updated atomically, 1067 * we take the siglock around fetching them. 1068 */ 1069 spin_lock_irq(&tsk->sighand->siglock); 1070 cutime = tsk->signal->cutime; 1071 cstime = tsk->signal->cstime; 1072 spin_unlock_irq(&tsk->sighand->siglock); 1073 read_unlock(&tasklist_lock); 1074 } 1075 tmp.tms_utime = cputime_to_clock_t(utime); 1076 tmp.tms_stime = cputime_to_clock_t(stime); 1077 tmp.tms_cutime = cputime_to_clock_t(cutime); 1078 tmp.tms_cstime = cputime_to_clock_t(cstime); 1079 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) 1080 return -EFAULT; 1081 } 1082 return (long) jiffies_64_to_clock_t(get_jiffies_64()); 1083 } 1084 1085 /* 1086 * This needs some heavy checking ... 1087 * I just haven't the stomach for it. I also don't fully 1088 * understand sessions/pgrp etc. Let somebody who does explain it. 1089 * 1090 * OK, I think I have the protection semantics right.... this is really 1091 * only important on a multi-user system anyway, to make sure one user 1092 * can't send a signal to a process owned by another. -TYT, 12/12/91 1093 * 1094 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. 1095 * LBT 04.03.94 1096 */ 1097 1098 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) 1099 { 1100 struct task_struct *p; 1101 struct task_struct *group_leader = current->group_leader; 1102 int err = -EINVAL; 1103 1104 if (!pid) 1105 pid = group_leader->pid; 1106 if (!pgid) 1107 pgid = pid; 1108 if (pgid < 0) 1109 return -EINVAL; 1110 1111 /* From this point forward we keep holding onto the tasklist lock 1112 * so that our parent does not change from under us. -DaveM 1113 */ 1114 write_lock_irq(&tasklist_lock); 1115 1116 err = -ESRCH; 1117 p = find_task_by_pid(pid); 1118 if (!p) 1119 goto out; 1120 1121 err = -EINVAL; 1122 if (!thread_group_leader(p)) 1123 goto out; 1124 1125 if (p->real_parent == group_leader) { 1126 err = -EPERM; 1127 if (p->signal->session != group_leader->signal->session) 1128 goto out; 1129 err = -EACCES; 1130 if (p->did_exec) 1131 goto out; 1132 } else { 1133 err = -ESRCH; 1134 if (p != group_leader) 1135 goto out; 1136 } 1137 1138 err = -EPERM; 1139 if (p->signal->leader) 1140 goto out; 1141 1142 if (pgid != pid) { 1143 struct task_struct *p; 1144 1145 do_each_task_pid(pgid, PIDTYPE_PGID, p) { 1146 if (p->signal->session == group_leader->signal->session) 1147 goto ok_pgid; 1148 } while_each_task_pid(pgid, PIDTYPE_PGID, p); 1149 goto out; 1150 } 1151 1152 ok_pgid: 1153 err = security_task_setpgid(p, pgid); 1154 if (err) 1155 goto out; 1156 1157 if (process_group(p) != pgid) { 1158 detach_pid(p, PIDTYPE_PGID); 1159 p->signal->pgrp = pgid; 1160 attach_pid(p, PIDTYPE_PGID, pgid); 1161 } 1162 1163 err = 0; 1164 out: 1165 /* All paths lead to here, thus we are safe. -DaveM */ 1166 write_unlock_irq(&tasklist_lock); 1167 return err; 1168 } 1169 1170 asmlinkage long sys_getpgid(pid_t pid) 1171 { 1172 if (!pid) { 1173 return process_group(current); 1174 } else { 1175 int retval; 1176 struct task_struct *p; 1177 1178 read_lock(&tasklist_lock); 1179 p = find_task_by_pid(pid); 1180 1181 retval = -ESRCH; 1182 if (p) { 1183 retval = security_task_getpgid(p); 1184 if (!retval) 1185 retval = process_group(p); 1186 } 1187 read_unlock(&tasklist_lock); 1188 return retval; 1189 } 1190 } 1191 1192 #ifdef __ARCH_WANT_SYS_GETPGRP 1193 1194 asmlinkage long sys_getpgrp(void) 1195 { 1196 /* SMP - assuming writes are word atomic this is fine */ 1197 return process_group(current); 1198 } 1199 1200 #endif 1201 1202 asmlinkage long sys_getsid(pid_t pid) 1203 { 1204 if (!pid) { 1205 return current->signal->session; 1206 } else { 1207 int retval; 1208 struct task_struct *p; 1209 1210 read_lock(&tasklist_lock); 1211 p = find_task_by_pid(pid); 1212 1213 retval = -ESRCH; 1214 if(p) { 1215 retval = security_task_getsid(p); 1216 if (!retval) 1217 retval = p->signal->session; 1218 } 1219 read_unlock(&tasklist_lock); 1220 return retval; 1221 } 1222 } 1223 1224 asmlinkage long sys_setsid(void) 1225 { 1226 struct task_struct *group_leader = current->group_leader; 1227 struct pid *pid; 1228 int err = -EPERM; 1229 1230 down(&tty_sem); 1231 write_lock_irq(&tasklist_lock); 1232 1233 pid = find_pid(PIDTYPE_PGID, group_leader->pid); 1234 if (pid) 1235 goto out; 1236 1237 group_leader->signal->leader = 1; 1238 __set_special_pids(group_leader->pid, group_leader->pid); 1239 group_leader->signal->tty = NULL; 1240 group_leader->signal->tty_old_pgrp = 0; 1241 err = process_group(group_leader); 1242 out: 1243 write_unlock_irq(&tasklist_lock); 1244 up(&tty_sem); 1245 return err; 1246 } 1247 1248 /* 1249 * Supplementary group IDs 1250 */ 1251 1252 /* init to 2 - one for init_task, one to ensure it is never freed */ 1253 struct group_info init_groups = { .usage = ATOMIC_INIT(2) }; 1254 1255 struct group_info *groups_alloc(int gidsetsize) 1256 { 1257 struct group_info *group_info; 1258 int nblocks; 1259 int i; 1260 1261 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK; 1262 /* Make sure we always allocate at least one indirect block pointer */ 1263 nblocks = nblocks ? : 1; 1264 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER); 1265 if (!group_info) 1266 return NULL; 1267 group_info->ngroups = gidsetsize; 1268 group_info->nblocks = nblocks; 1269 atomic_set(&group_info->usage, 1); 1270 1271 if (gidsetsize <= NGROUPS_SMALL) { 1272 group_info->blocks[0] = group_info->small_block; 1273 } else { 1274 for (i = 0; i < nblocks; i++) { 1275 gid_t *b; 1276 b = (void *)__get_free_page(GFP_USER); 1277 if (!b) 1278 goto out_undo_partial_alloc; 1279 group_info->blocks[i] = b; 1280 } 1281 } 1282 return group_info; 1283 1284 out_undo_partial_alloc: 1285 while (--i >= 0) { 1286 free_page((unsigned long)group_info->blocks[i]); 1287 } 1288 kfree(group_info); 1289 return NULL; 1290 } 1291 1292 EXPORT_SYMBOL(groups_alloc); 1293 1294 void groups_free(struct group_info *group_info) 1295 { 1296 if (group_info->blocks[0] != group_info->small_block) { 1297 int i; 1298 for (i = 0; i < group_info->nblocks; i++) 1299 free_page((unsigned long)group_info->blocks[i]); 1300 } 1301 kfree(group_info); 1302 } 1303 1304 EXPORT_SYMBOL(groups_free); 1305 1306 /* export the group_info to a user-space array */ 1307 static int groups_to_user(gid_t __user *grouplist, 1308 struct group_info *group_info) 1309 { 1310 int i; 1311 int count = group_info->ngroups; 1312 1313 for (i = 0; i < group_info->nblocks; i++) { 1314 int cp_count = min(NGROUPS_PER_BLOCK, count); 1315 int off = i * NGROUPS_PER_BLOCK; 1316 int len = cp_count * sizeof(*grouplist); 1317 1318 if (copy_to_user(grouplist+off, group_info->blocks[i], len)) 1319 return -EFAULT; 1320 1321 count -= cp_count; 1322 } 1323 return 0; 1324 } 1325 1326 /* fill a group_info from a user-space array - it must be allocated already */ 1327 static int groups_from_user(struct group_info *group_info, 1328 gid_t __user *grouplist) 1329 { 1330 int i; 1331 int count = group_info->ngroups; 1332 1333 for (i = 0; i < group_info->nblocks; i++) { 1334 int cp_count = min(NGROUPS_PER_BLOCK, count); 1335 int off = i * NGROUPS_PER_BLOCK; 1336 int len = cp_count * sizeof(*grouplist); 1337 1338 if (copy_from_user(group_info->blocks[i], grouplist+off, len)) 1339 return -EFAULT; 1340 1341 count -= cp_count; 1342 } 1343 return 0; 1344 } 1345 1346 /* a simple Shell sort */ 1347 static void groups_sort(struct group_info *group_info) 1348 { 1349 int base, max, stride; 1350 int gidsetsize = group_info->ngroups; 1351 1352 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1) 1353 ; /* nothing */ 1354 stride /= 3; 1355 1356 while (stride) { 1357 max = gidsetsize - stride; 1358 for (base = 0; base < max; base++) { 1359 int left = base; 1360 int right = left + stride; 1361 gid_t tmp = GROUP_AT(group_info, right); 1362 1363 while (left >= 0 && GROUP_AT(group_info, left) > tmp) { 1364 GROUP_AT(group_info, right) = 1365 GROUP_AT(group_info, left); 1366 right = left; 1367 left -= stride; 1368 } 1369 GROUP_AT(group_info, right) = tmp; 1370 } 1371 stride /= 3; 1372 } 1373 } 1374 1375 /* a simple bsearch */ 1376 int groups_search(struct group_info *group_info, gid_t grp) 1377 { 1378 int left, right; 1379 1380 if (!group_info) 1381 return 0; 1382 1383 left = 0; 1384 right = group_info->ngroups; 1385 while (left < right) { 1386 int mid = (left+right)/2; 1387 int cmp = grp - GROUP_AT(group_info, mid); 1388 if (cmp > 0) 1389 left = mid + 1; 1390 else if (cmp < 0) 1391 right = mid; 1392 else 1393 return 1; 1394 } 1395 return 0; 1396 } 1397 1398 /* validate and set current->group_info */ 1399 int set_current_groups(struct group_info *group_info) 1400 { 1401 int retval; 1402 struct group_info *old_info; 1403 1404 retval = security_task_setgroups(group_info); 1405 if (retval) 1406 return retval; 1407 1408 groups_sort(group_info); 1409 get_group_info(group_info); 1410 1411 task_lock(current); 1412 old_info = current->group_info; 1413 current->group_info = group_info; 1414 task_unlock(current); 1415 1416 put_group_info(old_info); 1417 1418 return 0; 1419 } 1420 1421 EXPORT_SYMBOL(set_current_groups); 1422 1423 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) 1424 { 1425 int i = 0; 1426 1427 /* 1428 * SMP: Nobody else can change our grouplist. Thus we are 1429 * safe. 1430 */ 1431 1432 if (gidsetsize < 0) 1433 return -EINVAL; 1434 1435 /* no need to grab task_lock here; it cannot change */ 1436 get_group_info(current->group_info); 1437 i = current->group_info->ngroups; 1438 if (gidsetsize) { 1439 if (i > gidsetsize) { 1440 i = -EINVAL; 1441 goto out; 1442 } 1443 if (groups_to_user(grouplist, current->group_info)) { 1444 i = -EFAULT; 1445 goto out; 1446 } 1447 } 1448 out: 1449 put_group_info(current->group_info); 1450 return i; 1451 } 1452 1453 /* 1454 * SMP: Our groups are copy-on-write. We can set them safely 1455 * without another task interfering. 1456 */ 1457 1458 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist) 1459 { 1460 struct group_info *group_info; 1461 int retval; 1462 1463 if (!capable(CAP_SETGID)) 1464 return -EPERM; 1465 if ((unsigned)gidsetsize > NGROUPS_MAX) 1466 return -EINVAL; 1467 1468 group_info = groups_alloc(gidsetsize); 1469 if (!group_info) 1470 return -ENOMEM; 1471 retval = groups_from_user(group_info, grouplist); 1472 if (retval) { 1473 put_group_info(group_info); 1474 return retval; 1475 } 1476 1477 retval = set_current_groups(group_info); 1478 put_group_info(group_info); 1479 1480 return retval; 1481 } 1482 1483 /* 1484 * Check whether we're fsgid/egid or in the supplemental group.. 1485 */ 1486 int in_group_p(gid_t grp) 1487 { 1488 int retval = 1; 1489 if (grp != current->fsgid) { 1490 get_group_info(current->group_info); 1491 retval = groups_search(current->group_info, grp); 1492 put_group_info(current->group_info); 1493 } 1494 return retval; 1495 } 1496 1497 EXPORT_SYMBOL(in_group_p); 1498 1499 int in_egroup_p(gid_t grp) 1500 { 1501 int retval = 1; 1502 if (grp != current->egid) { 1503 get_group_info(current->group_info); 1504 retval = groups_search(current->group_info, grp); 1505 put_group_info(current->group_info); 1506 } 1507 return retval; 1508 } 1509 1510 EXPORT_SYMBOL(in_egroup_p); 1511 1512 DECLARE_RWSEM(uts_sem); 1513 1514 EXPORT_SYMBOL(uts_sem); 1515 1516 asmlinkage long sys_newuname(struct new_utsname __user * name) 1517 { 1518 int errno = 0; 1519 1520 down_read(&uts_sem); 1521 if (copy_to_user(name,&system_utsname,sizeof *name)) 1522 errno = -EFAULT; 1523 up_read(&uts_sem); 1524 return errno; 1525 } 1526 1527 asmlinkage long sys_sethostname(char __user *name, int len) 1528 { 1529 int errno; 1530 char tmp[__NEW_UTS_LEN]; 1531 1532 if (!capable(CAP_SYS_ADMIN)) 1533 return -EPERM; 1534 if (len < 0 || len > __NEW_UTS_LEN) 1535 return -EINVAL; 1536 down_write(&uts_sem); 1537 errno = -EFAULT; 1538 if (!copy_from_user(tmp, name, len)) { 1539 memcpy(system_utsname.nodename, tmp, len); 1540 system_utsname.nodename[len] = 0; 1541 errno = 0; 1542 } 1543 up_write(&uts_sem); 1544 return errno; 1545 } 1546 1547 #ifdef __ARCH_WANT_SYS_GETHOSTNAME 1548 1549 asmlinkage long sys_gethostname(char __user *name, int len) 1550 { 1551 int i, errno; 1552 1553 if (len < 0) 1554 return -EINVAL; 1555 down_read(&uts_sem); 1556 i = 1 + strlen(system_utsname.nodename); 1557 if (i > len) 1558 i = len; 1559 errno = 0; 1560 if (copy_to_user(name, system_utsname.nodename, i)) 1561 errno = -EFAULT; 1562 up_read(&uts_sem); 1563 return errno; 1564 } 1565 1566 #endif 1567 1568 /* 1569 * Only setdomainname; getdomainname can be implemented by calling 1570 * uname() 1571 */ 1572 asmlinkage long sys_setdomainname(char __user *name, int len) 1573 { 1574 int errno; 1575 char tmp[__NEW_UTS_LEN]; 1576 1577 if (!capable(CAP_SYS_ADMIN)) 1578 return -EPERM; 1579 if (len < 0 || len > __NEW_UTS_LEN) 1580 return -EINVAL; 1581 1582 down_write(&uts_sem); 1583 errno = -EFAULT; 1584 if (!copy_from_user(tmp, name, len)) { 1585 memcpy(system_utsname.domainname, tmp, len); 1586 system_utsname.domainname[len] = 0; 1587 errno = 0; 1588 } 1589 up_write(&uts_sem); 1590 return errno; 1591 } 1592 1593 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim) 1594 { 1595 if (resource >= RLIM_NLIMITS) 1596 return -EINVAL; 1597 else { 1598 struct rlimit value; 1599 task_lock(current->group_leader); 1600 value = current->signal->rlim[resource]; 1601 task_unlock(current->group_leader); 1602 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; 1603 } 1604 } 1605 1606 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT 1607 1608 /* 1609 * Back compatibility for getrlimit. Needed for some apps. 1610 */ 1611 1612 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim) 1613 { 1614 struct rlimit x; 1615 if (resource >= RLIM_NLIMITS) 1616 return -EINVAL; 1617 1618 task_lock(current->group_leader); 1619 x = current->signal->rlim[resource]; 1620 task_unlock(current->group_leader); 1621 if(x.rlim_cur > 0x7FFFFFFF) 1622 x.rlim_cur = 0x7FFFFFFF; 1623 if(x.rlim_max > 0x7FFFFFFF) 1624 x.rlim_max = 0x7FFFFFFF; 1625 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0; 1626 } 1627 1628 #endif 1629 1630 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) 1631 { 1632 struct rlimit new_rlim, *old_rlim; 1633 int retval; 1634 1635 if (resource >= RLIM_NLIMITS) 1636 return -EINVAL; 1637 if(copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1638 return -EFAULT; 1639 if (new_rlim.rlim_cur > new_rlim.rlim_max) 1640 return -EINVAL; 1641 old_rlim = current->signal->rlim + resource; 1642 if ((new_rlim.rlim_max > old_rlim->rlim_max) && 1643 !capable(CAP_SYS_RESOURCE)) 1644 return -EPERM; 1645 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN) 1646 return -EPERM; 1647 1648 retval = security_task_setrlimit(resource, &new_rlim); 1649 if (retval) 1650 return retval; 1651 1652 task_lock(current->group_leader); 1653 *old_rlim = new_rlim; 1654 task_unlock(current->group_leader); 1655 1656 if (resource == RLIMIT_CPU && new_rlim.rlim_cur != RLIM_INFINITY && 1657 (cputime_eq(current->signal->it_prof_expires, cputime_zero) || 1658 new_rlim.rlim_cur <= cputime_to_secs( 1659 current->signal->it_prof_expires))) { 1660 cputime_t cputime = secs_to_cputime(new_rlim.rlim_cur); 1661 read_lock(&tasklist_lock); 1662 spin_lock_irq(¤t->sighand->siglock); 1663 set_process_cpu_timer(current, CPUCLOCK_PROF, 1664 &cputime, NULL); 1665 spin_unlock_irq(¤t->sighand->siglock); 1666 read_unlock(&tasklist_lock); 1667 } 1668 1669 return 0; 1670 } 1671 1672 /* 1673 * It would make sense to put struct rusage in the task_struct, 1674 * except that would make the task_struct be *really big*. After 1675 * task_struct gets moved into malloc'ed memory, it would 1676 * make sense to do this. It will make moving the rest of the information 1677 * a lot simpler! (Which we're not doing right now because we're not 1678 * measuring them yet). 1679 * 1680 * This expects to be called with tasklist_lock read-locked or better, 1681 * and the siglock not locked. It may momentarily take the siglock. 1682 * 1683 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have 1684 * races with threads incrementing their own counters. But since word 1685 * reads are atomic, we either get new values or old values and we don't 1686 * care which for the sums. We always take the siglock to protect reading 1687 * the c* fields from p->signal from races with exit.c updating those 1688 * fields when reaping, so a sample either gets all the additions of a 1689 * given child after it's reaped, or none so this sample is before reaping. 1690 */ 1691 1692 static void k_getrusage(struct task_struct *p, int who, struct rusage *r) 1693 { 1694 struct task_struct *t; 1695 unsigned long flags; 1696 cputime_t utime, stime; 1697 1698 memset((char *) r, 0, sizeof *r); 1699 1700 if (unlikely(!p->signal)) 1701 return; 1702 1703 utime = stime = cputime_zero; 1704 1705 switch (who) { 1706 case RUSAGE_BOTH: 1707 case RUSAGE_CHILDREN: 1708 spin_lock_irqsave(&p->sighand->siglock, flags); 1709 utime = p->signal->cutime; 1710 stime = p->signal->cstime; 1711 r->ru_nvcsw = p->signal->cnvcsw; 1712 r->ru_nivcsw = p->signal->cnivcsw; 1713 r->ru_minflt = p->signal->cmin_flt; 1714 r->ru_majflt = p->signal->cmaj_flt; 1715 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1716 1717 if (who == RUSAGE_CHILDREN) 1718 break; 1719 1720 case RUSAGE_SELF: 1721 utime = cputime_add(utime, p->signal->utime); 1722 stime = cputime_add(stime, p->signal->stime); 1723 r->ru_nvcsw += p->signal->nvcsw; 1724 r->ru_nivcsw += p->signal->nivcsw; 1725 r->ru_minflt += p->signal->min_flt; 1726 r->ru_majflt += p->signal->maj_flt; 1727 t = p; 1728 do { 1729 utime = cputime_add(utime, t->utime); 1730 stime = cputime_add(stime, t->stime); 1731 r->ru_nvcsw += t->nvcsw; 1732 r->ru_nivcsw += t->nivcsw; 1733 r->ru_minflt += t->min_flt; 1734 r->ru_majflt += t->maj_flt; 1735 t = next_thread(t); 1736 } while (t != p); 1737 break; 1738 1739 default: 1740 BUG(); 1741 } 1742 1743 cputime_to_timeval(utime, &r->ru_utime); 1744 cputime_to_timeval(stime, &r->ru_stime); 1745 } 1746 1747 int getrusage(struct task_struct *p, int who, struct rusage __user *ru) 1748 { 1749 struct rusage r; 1750 read_lock(&tasklist_lock); 1751 k_getrusage(p, who, &r); 1752 read_unlock(&tasklist_lock); 1753 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1754 } 1755 1756 asmlinkage long sys_getrusage(int who, struct rusage __user *ru) 1757 { 1758 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) 1759 return -EINVAL; 1760 return getrusage(current, who, ru); 1761 } 1762 1763 asmlinkage long sys_umask(int mask) 1764 { 1765 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); 1766 return mask; 1767 } 1768 1769 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, 1770 unsigned long arg4, unsigned long arg5) 1771 { 1772 long error; 1773 1774 error = security_task_prctl(option, arg2, arg3, arg4, arg5); 1775 if (error) 1776 return error; 1777 1778 switch (option) { 1779 case PR_SET_PDEATHSIG: 1780 if (!valid_signal(arg2)) { 1781 error = -EINVAL; 1782 break; 1783 } 1784 current->pdeath_signal = arg2; 1785 break; 1786 case PR_GET_PDEATHSIG: 1787 error = put_user(current->pdeath_signal, (int __user *)arg2); 1788 break; 1789 case PR_GET_DUMPABLE: 1790 error = current->mm->dumpable; 1791 break; 1792 case PR_SET_DUMPABLE: 1793 if (arg2 < 0 || arg2 > 2) { 1794 error = -EINVAL; 1795 break; 1796 } 1797 current->mm->dumpable = arg2; 1798 break; 1799 1800 case PR_SET_UNALIGN: 1801 error = SET_UNALIGN_CTL(current, arg2); 1802 break; 1803 case PR_GET_UNALIGN: 1804 error = GET_UNALIGN_CTL(current, arg2); 1805 break; 1806 case PR_SET_FPEMU: 1807 error = SET_FPEMU_CTL(current, arg2); 1808 break; 1809 case PR_GET_FPEMU: 1810 error = GET_FPEMU_CTL(current, arg2); 1811 break; 1812 case PR_SET_FPEXC: 1813 error = SET_FPEXC_CTL(current, arg2); 1814 break; 1815 case PR_GET_FPEXC: 1816 error = GET_FPEXC_CTL(current, arg2); 1817 break; 1818 case PR_GET_TIMING: 1819 error = PR_TIMING_STATISTICAL; 1820 break; 1821 case PR_SET_TIMING: 1822 if (arg2 == PR_TIMING_STATISTICAL) 1823 error = 0; 1824 else 1825 error = -EINVAL; 1826 break; 1827 1828 case PR_GET_KEEPCAPS: 1829 if (current->keep_capabilities) 1830 error = 1; 1831 break; 1832 case PR_SET_KEEPCAPS: 1833 if (arg2 != 0 && arg2 != 1) { 1834 error = -EINVAL; 1835 break; 1836 } 1837 current->keep_capabilities = arg2; 1838 break; 1839 case PR_SET_NAME: { 1840 struct task_struct *me = current; 1841 unsigned char ncomm[sizeof(me->comm)]; 1842 1843 ncomm[sizeof(me->comm)-1] = 0; 1844 if (strncpy_from_user(ncomm, (char __user *)arg2, 1845 sizeof(me->comm)-1) < 0) 1846 return -EFAULT; 1847 set_task_comm(me, ncomm); 1848 return 0; 1849 } 1850 case PR_GET_NAME: { 1851 struct task_struct *me = current; 1852 unsigned char tcomm[sizeof(me->comm)]; 1853 1854 get_task_comm(tcomm, me); 1855 if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm))) 1856 return -EFAULT; 1857 return 0; 1858 } 1859 default: 1860 error = -EINVAL; 1861 break; 1862 } 1863 return error; 1864 } 1865