1 /* 2 * linux/kernel/sys.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/config.h> 8 #include <linux/module.h> 9 #include <linux/mm.h> 10 #include <linux/utsname.h> 11 #include <linux/mman.h> 12 #include <linux/smp_lock.h> 13 #include <linux/notifier.h> 14 #include <linux/reboot.h> 15 #include <linux/prctl.h> 16 #include <linux/init.h> 17 #include <linux/highuid.h> 18 #include <linux/fs.h> 19 #include <linux/kernel.h> 20 #include <linux/kexec.h> 21 #include <linux/workqueue.h> 22 #include <linux/capability.h> 23 #include <linux/device.h> 24 #include <linux/key.h> 25 #include <linux/times.h> 26 #include <linux/posix-timers.h> 27 #include <linux/security.h> 28 #include <linux/dcookies.h> 29 #include <linux/suspend.h> 30 #include <linux/tty.h> 31 #include <linux/signal.h> 32 #include <linux/cn_proc.h> 33 34 #include <linux/compat.h> 35 #include <linux/syscalls.h> 36 #include <linux/kprobes.h> 37 38 #include <asm/uaccess.h> 39 #include <asm/io.h> 40 #include <asm/unistd.h> 41 42 #ifndef SET_UNALIGN_CTL 43 # define SET_UNALIGN_CTL(a,b) (-EINVAL) 44 #endif 45 #ifndef GET_UNALIGN_CTL 46 # define GET_UNALIGN_CTL(a,b) (-EINVAL) 47 #endif 48 #ifndef SET_FPEMU_CTL 49 # define SET_FPEMU_CTL(a,b) (-EINVAL) 50 #endif 51 #ifndef GET_FPEMU_CTL 52 # define GET_FPEMU_CTL(a,b) (-EINVAL) 53 #endif 54 #ifndef SET_FPEXC_CTL 55 # define SET_FPEXC_CTL(a,b) (-EINVAL) 56 #endif 57 #ifndef GET_FPEXC_CTL 58 # define GET_FPEXC_CTL(a,b) (-EINVAL) 59 #endif 60 61 /* 62 * this is where the system-wide overflow UID and GID are defined, for 63 * architectures that now have 32-bit UID/GID but didn't in the past 64 */ 65 66 int overflowuid = DEFAULT_OVERFLOWUID; 67 int overflowgid = DEFAULT_OVERFLOWGID; 68 69 #ifdef CONFIG_UID16 70 EXPORT_SYMBOL(overflowuid); 71 EXPORT_SYMBOL(overflowgid); 72 #endif 73 74 /* 75 * the same as above, but for filesystems which can only store a 16-bit 76 * UID and GID. as such, this is needed on all architectures 77 */ 78 79 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; 80 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID; 81 82 EXPORT_SYMBOL(fs_overflowuid); 83 EXPORT_SYMBOL(fs_overflowgid); 84 85 /* 86 * this indicates whether you can reboot with ctrl-alt-del: the default is yes 87 */ 88 89 int C_A_D = 1; 90 int cad_pid = 1; 91 92 /* 93 * Notifier list for kernel code which wants to be called 94 * at shutdown. This is used to stop any idling DMA operations 95 * and the like. 96 */ 97 98 static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); 99 100 /* 101 * Notifier chain core routines. The exported routines below 102 * are layered on top of these, with appropriate locking added. 103 */ 104 105 static int notifier_chain_register(struct notifier_block **nl, 106 struct notifier_block *n) 107 { 108 while ((*nl) != NULL) { 109 if (n->priority > (*nl)->priority) 110 break; 111 nl = &((*nl)->next); 112 } 113 n->next = *nl; 114 rcu_assign_pointer(*nl, n); 115 return 0; 116 } 117 118 static int notifier_chain_unregister(struct notifier_block **nl, 119 struct notifier_block *n) 120 { 121 while ((*nl) != NULL) { 122 if ((*nl) == n) { 123 rcu_assign_pointer(*nl, n->next); 124 return 0; 125 } 126 nl = &((*nl)->next); 127 } 128 return -ENOENT; 129 } 130 131 static int __kprobes notifier_call_chain(struct notifier_block **nl, 132 unsigned long val, void *v) 133 { 134 int ret = NOTIFY_DONE; 135 struct notifier_block *nb; 136 137 nb = rcu_dereference(*nl); 138 while (nb) { 139 ret = nb->notifier_call(nb, val, v); 140 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) 141 break; 142 nb = rcu_dereference(nb->next); 143 } 144 return ret; 145 } 146 147 /* 148 * Atomic notifier chain routines. Registration and unregistration 149 * use a mutex, and call_chain is synchronized by RCU (no locks). 150 */ 151 152 /** 153 * atomic_notifier_chain_register - Add notifier to an atomic notifier chain 154 * @nh: Pointer to head of the atomic notifier chain 155 * @n: New entry in notifier chain 156 * 157 * Adds a notifier to an atomic notifier chain. 158 * 159 * Currently always returns zero. 160 */ 161 162 int atomic_notifier_chain_register(struct atomic_notifier_head *nh, 163 struct notifier_block *n) 164 { 165 unsigned long flags; 166 int ret; 167 168 spin_lock_irqsave(&nh->lock, flags); 169 ret = notifier_chain_register(&nh->head, n); 170 spin_unlock_irqrestore(&nh->lock, flags); 171 return ret; 172 } 173 174 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register); 175 176 /** 177 * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain 178 * @nh: Pointer to head of the atomic notifier chain 179 * @n: Entry to remove from notifier chain 180 * 181 * Removes a notifier from an atomic notifier chain. 182 * 183 * Returns zero on success or %-ENOENT on failure. 184 */ 185 int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, 186 struct notifier_block *n) 187 { 188 unsigned long flags; 189 int ret; 190 191 spin_lock_irqsave(&nh->lock, flags); 192 ret = notifier_chain_unregister(&nh->head, n); 193 spin_unlock_irqrestore(&nh->lock, flags); 194 synchronize_rcu(); 195 return ret; 196 } 197 198 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); 199 200 /** 201 * atomic_notifier_call_chain - Call functions in an atomic notifier chain 202 * @nh: Pointer to head of the atomic notifier chain 203 * @val: Value passed unmodified to notifier function 204 * @v: Pointer passed unmodified to notifier function 205 * 206 * Calls each function in a notifier chain in turn. The functions 207 * run in an atomic context, so they must not block. 208 * This routine uses RCU to synchronize with changes to the chain. 209 * 210 * If the return value of the notifier can be and'ed 211 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain 212 * will return immediately, with the return value of 213 * the notifier function which halted execution. 214 * Otherwise the return value is the return value 215 * of the last notifier function called. 216 */ 217 218 int atomic_notifier_call_chain(struct atomic_notifier_head *nh, 219 unsigned long val, void *v) 220 { 221 int ret; 222 223 rcu_read_lock(); 224 ret = notifier_call_chain(&nh->head, val, v); 225 rcu_read_unlock(); 226 return ret; 227 } 228 229 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); 230 231 /* 232 * Blocking notifier chain routines. All access to the chain is 233 * synchronized by an rwsem. 234 */ 235 236 /** 237 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain 238 * @nh: Pointer to head of the blocking notifier chain 239 * @n: New entry in notifier chain 240 * 241 * Adds a notifier to a blocking notifier chain. 242 * Must be called in process context. 243 * 244 * Currently always returns zero. 245 */ 246 247 int blocking_notifier_chain_register(struct blocking_notifier_head *nh, 248 struct notifier_block *n) 249 { 250 int ret; 251 252 /* 253 * This code gets used during boot-up, when task switching is 254 * not yet working and interrupts must remain disabled. At 255 * such times we must not call down_write(). 256 */ 257 if (unlikely(system_state == SYSTEM_BOOTING)) 258 return notifier_chain_register(&nh->head, n); 259 260 down_write(&nh->rwsem); 261 ret = notifier_chain_register(&nh->head, n); 262 up_write(&nh->rwsem); 263 return ret; 264 } 265 266 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); 267 268 /** 269 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain 270 * @nh: Pointer to head of the blocking notifier chain 271 * @n: Entry to remove from notifier chain 272 * 273 * Removes a notifier from a blocking notifier chain. 274 * Must be called from process context. 275 * 276 * Returns zero on success or %-ENOENT on failure. 277 */ 278 int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, 279 struct notifier_block *n) 280 { 281 int ret; 282 283 /* 284 * This code gets used during boot-up, when task switching is 285 * not yet working and interrupts must remain disabled. At 286 * such times we must not call down_write(). 287 */ 288 if (unlikely(system_state == SYSTEM_BOOTING)) 289 return notifier_chain_unregister(&nh->head, n); 290 291 down_write(&nh->rwsem); 292 ret = notifier_chain_unregister(&nh->head, n); 293 up_write(&nh->rwsem); 294 return ret; 295 } 296 297 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); 298 299 /** 300 * blocking_notifier_call_chain - Call functions in a blocking notifier chain 301 * @nh: Pointer to head of the blocking notifier chain 302 * @val: Value passed unmodified to notifier function 303 * @v: Pointer passed unmodified to notifier function 304 * 305 * Calls each function in a notifier chain in turn. The functions 306 * run in a process context, so they are allowed to block. 307 * 308 * If the return value of the notifier can be and'ed 309 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain 310 * will return immediately, with the return value of 311 * the notifier function which halted execution. 312 * Otherwise the return value is the return value 313 * of the last notifier function called. 314 */ 315 316 int blocking_notifier_call_chain(struct blocking_notifier_head *nh, 317 unsigned long val, void *v) 318 { 319 int ret; 320 321 down_read(&nh->rwsem); 322 ret = notifier_call_chain(&nh->head, val, v); 323 up_read(&nh->rwsem); 324 return ret; 325 } 326 327 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); 328 329 /* 330 * Raw notifier chain routines. There is no protection; 331 * the caller must provide it. Use at your own risk! 332 */ 333 334 /** 335 * raw_notifier_chain_register - Add notifier to a raw notifier chain 336 * @nh: Pointer to head of the raw notifier chain 337 * @n: New entry in notifier chain 338 * 339 * Adds a notifier to a raw notifier chain. 340 * All locking must be provided by the caller. 341 * 342 * Currently always returns zero. 343 */ 344 345 int raw_notifier_chain_register(struct raw_notifier_head *nh, 346 struct notifier_block *n) 347 { 348 return notifier_chain_register(&nh->head, n); 349 } 350 351 EXPORT_SYMBOL_GPL(raw_notifier_chain_register); 352 353 /** 354 * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain 355 * @nh: Pointer to head of the raw notifier chain 356 * @n: Entry to remove from notifier chain 357 * 358 * Removes a notifier from a raw notifier chain. 359 * All locking must be provided by the caller. 360 * 361 * Returns zero on success or %-ENOENT on failure. 362 */ 363 int raw_notifier_chain_unregister(struct raw_notifier_head *nh, 364 struct notifier_block *n) 365 { 366 return notifier_chain_unregister(&nh->head, n); 367 } 368 369 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); 370 371 /** 372 * raw_notifier_call_chain - Call functions in a raw notifier chain 373 * @nh: Pointer to head of the raw notifier chain 374 * @val: Value passed unmodified to notifier function 375 * @v: Pointer passed unmodified to notifier function 376 * 377 * Calls each function in a notifier chain in turn. The functions 378 * run in an undefined context. 379 * All locking must be provided by the caller. 380 * 381 * If the return value of the notifier can be and'ed 382 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain 383 * will return immediately, with the return value of 384 * the notifier function which halted execution. 385 * Otherwise the return value is the return value 386 * of the last notifier function called. 387 */ 388 389 int raw_notifier_call_chain(struct raw_notifier_head *nh, 390 unsigned long val, void *v) 391 { 392 return notifier_call_chain(&nh->head, val, v); 393 } 394 395 EXPORT_SYMBOL_GPL(raw_notifier_call_chain); 396 397 /** 398 * register_reboot_notifier - Register function to be called at reboot time 399 * @nb: Info about notifier function to be called 400 * 401 * Registers a function with the list of functions 402 * to be called at reboot time. 403 * 404 * Currently always returns zero, as blocking_notifier_chain_register 405 * always returns zero. 406 */ 407 408 int register_reboot_notifier(struct notifier_block * nb) 409 { 410 return blocking_notifier_chain_register(&reboot_notifier_list, nb); 411 } 412 413 EXPORT_SYMBOL(register_reboot_notifier); 414 415 /** 416 * unregister_reboot_notifier - Unregister previously registered reboot notifier 417 * @nb: Hook to be unregistered 418 * 419 * Unregisters a previously registered reboot 420 * notifier function. 421 * 422 * Returns zero on success, or %-ENOENT on failure. 423 */ 424 425 int unregister_reboot_notifier(struct notifier_block * nb) 426 { 427 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); 428 } 429 430 EXPORT_SYMBOL(unregister_reboot_notifier); 431 432 static int set_one_prio(struct task_struct *p, int niceval, int error) 433 { 434 int no_nice; 435 436 if (p->uid != current->euid && 437 p->euid != current->euid && !capable(CAP_SYS_NICE)) { 438 error = -EPERM; 439 goto out; 440 } 441 if (niceval < task_nice(p) && !can_nice(p, niceval)) { 442 error = -EACCES; 443 goto out; 444 } 445 no_nice = security_task_setnice(p, niceval); 446 if (no_nice) { 447 error = no_nice; 448 goto out; 449 } 450 if (error == -ESRCH) 451 error = 0; 452 set_user_nice(p, niceval); 453 out: 454 return error; 455 } 456 457 asmlinkage long sys_setpriority(int which, int who, int niceval) 458 { 459 struct task_struct *g, *p; 460 struct user_struct *user; 461 int error = -EINVAL; 462 463 if (which > 2 || which < 0) 464 goto out; 465 466 /* normalize: avoid signed division (rounding problems) */ 467 error = -ESRCH; 468 if (niceval < -20) 469 niceval = -20; 470 if (niceval > 19) 471 niceval = 19; 472 473 read_lock(&tasklist_lock); 474 switch (which) { 475 case PRIO_PROCESS: 476 if (!who) 477 who = current->pid; 478 p = find_task_by_pid(who); 479 if (p) 480 error = set_one_prio(p, niceval, error); 481 break; 482 case PRIO_PGRP: 483 if (!who) 484 who = process_group(current); 485 do_each_task_pid(who, PIDTYPE_PGID, p) { 486 error = set_one_prio(p, niceval, error); 487 } while_each_task_pid(who, PIDTYPE_PGID, p); 488 break; 489 case PRIO_USER: 490 user = current->user; 491 if (!who) 492 who = current->uid; 493 else 494 if ((who != current->uid) && !(user = find_user(who))) 495 goto out_unlock; /* No processes for this user */ 496 497 do_each_thread(g, p) 498 if (p->uid == who) 499 error = set_one_prio(p, niceval, error); 500 while_each_thread(g, p); 501 if (who != current->uid) 502 free_uid(user); /* For find_user() */ 503 break; 504 } 505 out_unlock: 506 read_unlock(&tasklist_lock); 507 out: 508 return error; 509 } 510 511 /* 512 * Ugh. To avoid negative return values, "getpriority()" will 513 * not return the normal nice-value, but a negated value that 514 * has been offset by 20 (ie it returns 40..1 instead of -20..19) 515 * to stay compatible. 516 */ 517 asmlinkage long sys_getpriority(int which, int who) 518 { 519 struct task_struct *g, *p; 520 struct user_struct *user; 521 long niceval, retval = -ESRCH; 522 523 if (which > 2 || which < 0) 524 return -EINVAL; 525 526 read_lock(&tasklist_lock); 527 switch (which) { 528 case PRIO_PROCESS: 529 if (!who) 530 who = current->pid; 531 p = find_task_by_pid(who); 532 if (p) { 533 niceval = 20 - task_nice(p); 534 if (niceval > retval) 535 retval = niceval; 536 } 537 break; 538 case PRIO_PGRP: 539 if (!who) 540 who = process_group(current); 541 do_each_task_pid(who, PIDTYPE_PGID, p) { 542 niceval = 20 - task_nice(p); 543 if (niceval > retval) 544 retval = niceval; 545 } while_each_task_pid(who, PIDTYPE_PGID, p); 546 break; 547 case PRIO_USER: 548 user = current->user; 549 if (!who) 550 who = current->uid; 551 else 552 if ((who != current->uid) && !(user = find_user(who))) 553 goto out_unlock; /* No processes for this user */ 554 555 do_each_thread(g, p) 556 if (p->uid == who) { 557 niceval = 20 - task_nice(p); 558 if (niceval > retval) 559 retval = niceval; 560 } 561 while_each_thread(g, p); 562 if (who != current->uid) 563 free_uid(user); /* for find_user() */ 564 break; 565 } 566 out_unlock: 567 read_unlock(&tasklist_lock); 568 569 return retval; 570 } 571 572 /** 573 * emergency_restart - reboot the system 574 * 575 * Without shutting down any hardware or taking any locks 576 * reboot the system. This is called when we know we are in 577 * trouble so this is our best effort to reboot. This is 578 * safe to call in interrupt context. 579 */ 580 void emergency_restart(void) 581 { 582 machine_emergency_restart(); 583 } 584 EXPORT_SYMBOL_GPL(emergency_restart); 585 586 void kernel_restart_prepare(char *cmd) 587 { 588 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); 589 system_state = SYSTEM_RESTART; 590 device_shutdown(); 591 } 592 593 /** 594 * kernel_restart - reboot the system 595 * @cmd: pointer to buffer containing command to execute for restart 596 * or %NULL 597 * 598 * Shutdown everything and perform a clean reboot. 599 * This is not safe to call in interrupt context. 600 */ 601 void kernel_restart(char *cmd) 602 { 603 kernel_restart_prepare(cmd); 604 if (!cmd) { 605 printk(KERN_EMERG "Restarting system.\n"); 606 } else { 607 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); 608 } 609 printk(".\n"); 610 machine_restart(cmd); 611 } 612 EXPORT_SYMBOL_GPL(kernel_restart); 613 614 /** 615 * kernel_kexec - reboot the system 616 * 617 * Move into place and start executing a preloaded standalone 618 * executable. If nothing was preloaded return an error. 619 */ 620 void kernel_kexec(void) 621 { 622 #ifdef CONFIG_KEXEC 623 struct kimage *image; 624 image = xchg(&kexec_image, NULL); 625 if (!image) { 626 return; 627 } 628 kernel_restart_prepare(NULL); 629 printk(KERN_EMERG "Starting new kernel\n"); 630 machine_shutdown(); 631 machine_kexec(image); 632 #endif 633 } 634 EXPORT_SYMBOL_GPL(kernel_kexec); 635 636 void kernel_shutdown_prepare(enum system_states state) 637 { 638 blocking_notifier_call_chain(&reboot_notifier_list, 639 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); 640 system_state = state; 641 device_shutdown(); 642 } 643 /** 644 * kernel_halt - halt the system 645 * 646 * Shutdown everything and perform a clean system halt. 647 */ 648 void kernel_halt(void) 649 { 650 kernel_shutdown_prepare(SYSTEM_HALT); 651 printk(KERN_EMERG "System halted.\n"); 652 machine_halt(); 653 } 654 655 EXPORT_SYMBOL_GPL(kernel_halt); 656 657 /** 658 * kernel_power_off - power_off the system 659 * 660 * Shutdown everything and perform a clean system power_off. 661 */ 662 void kernel_power_off(void) 663 { 664 kernel_shutdown_prepare(SYSTEM_POWER_OFF); 665 printk(KERN_EMERG "Power down.\n"); 666 machine_power_off(); 667 } 668 EXPORT_SYMBOL_GPL(kernel_power_off); 669 /* 670 * Reboot system call: for obvious reasons only root may call it, 671 * and even root needs to set up some magic numbers in the registers 672 * so that some mistake won't make this reboot the whole machine. 673 * You can also set the meaning of the ctrl-alt-del-key here. 674 * 675 * reboot doesn't sync: do that yourself before calling this. 676 */ 677 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg) 678 { 679 char buffer[256]; 680 681 /* We only trust the superuser with rebooting the system. */ 682 if (!capable(CAP_SYS_BOOT)) 683 return -EPERM; 684 685 /* For safety, we require "magic" arguments. */ 686 if (magic1 != LINUX_REBOOT_MAGIC1 || 687 (magic2 != LINUX_REBOOT_MAGIC2 && 688 magic2 != LINUX_REBOOT_MAGIC2A && 689 magic2 != LINUX_REBOOT_MAGIC2B && 690 magic2 != LINUX_REBOOT_MAGIC2C)) 691 return -EINVAL; 692 693 /* Instead of trying to make the power_off code look like 694 * halt when pm_power_off is not set do it the easy way. 695 */ 696 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off) 697 cmd = LINUX_REBOOT_CMD_HALT; 698 699 lock_kernel(); 700 switch (cmd) { 701 case LINUX_REBOOT_CMD_RESTART: 702 kernel_restart(NULL); 703 break; 704 705 case LINUX_REBOOT_CMD_CAD_ON: 706 C_A_D = 1; 707 break; 708 709 case LINUX_REBOOT_CMD_CAD_OFF: 710 C_A_D = 0; 711 break; 712 713 case LINUX_REBOOT_CMD_HALT: 714 kernel_halt(); 715 unlock_kernel(); 716 do_exit(0); 717 break; 718 719 case LINUX_REBOOT_CMD_POWER_OFF: 720 kernel_power_off(); 721 unlock_kernel(); 722 do_exit(0); 723 break; 724 725 case LINUX_REBOOT_CMD_RESTART2: 726 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) { 727 unlock_kernel(); 728 return -EFAULT; 729 } 730 buffer[sizeof(buffer) - 1] = '\0'; 731 732 kernel_restart(buffer); 733 break; 734 735 case LINUX_REBOOT_CMD_KEXEC: 736 kernel_kexec(); 737 unlock_kernel(); 738 return -EINVAL; 739 740 #ifdef CONFIG_SOFTWARE_SUSPEND 741 case LINUX_REBOOT_CMD_SW_SUSPEND: 742 { 743 int ret = software_suspend(); 744 unlock_kernel(); 745 return ret; 746 } 747 #endif 748 749 default: 750 unlock_kernel(); 751 return -EINVAL; 752 } 753 unlock_kernel(); 754 return 0; 755 } 756 757 static void deferred_cad(void *dummy) 758 { 759 kernel_restart(NULL); 760 } 761 762 /* 763 * This function gets called by ctrl-alt-del - ie the keyboard interrupt. 764 * As it's called within an interrupt, it may NOT sync: the only choice 765 * is whether to reboot at once, or just ignore the ctrl-alt-del. 766 */ 767 void ctrl_alt_del(void) 768 { 769 static DECLARE_WORK(cad_work, deferred_cad, NULL); 770 771 if (C_A_D) 772 schedule_work(&cad_work); 773 else 774 kill_proc(cad_pid, SIGINT, 1); 775 } 776 777 778 /* 779 * Unprivileged users may change the real gid to the effective gid 780 * or vice versa. (BSD-style) 781 * 782 * If you set the real gid at all, or set the effective gid to a value not 783 * equal to the real gid, then the saved gid is set to the new effective gid. 784 * 785 * This makes it possible for a setgid program to completely drop its 786 * privileges, which is often a useful assertion to make when you are doing 787 * a security audit over a program. 788 * 789 * The general idea is that a program which uses just setregid() will be 790 * 100% compatible with BSD. A program which uses just setgid() will be 791 * 100% compatible with POSIX with saved IDs. 792 * 793 * SMP: There are not races, the GIDs are checked only by filesystem 794 * operations (as far as semantic preservation is concerned). 795 */ 796 asmlinkage long sys_setregid(gid_t rgid, gid_t egid) 797 { 798 int old_rgid = current->gid; 799 int old_egid = current->egid; 800 int new_rgid = old_rgid; 801 int new_egid = old_egid; 802 int retval; 803 804 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE); 805 if (retval) 806 return retval; 807 808 if (rgid != (gid_t) -1) { 809 if ((old_rgid == rgid) || 810 (current->egid==rgid) || 811 capable(CAP_SETGID)) 812 new_rgid = rgid; 813 else 814 return -EPERM; 815 } 816 if (egid != (gid_t) -1) { 817 if ((old_rgid == egid) || 818 (current->egid == egid) || 819 (current->sgid == egid) || 820 capable(CAP_SETGID)) 821 new_egid = egid; 822 else { 823 return -EPERM; 824 } 825 } 826 if (new_egid != old_egid) 827 { 828 current->mm->dumpable = suid_dumpable; 829 smp_wmb(); 830 } 831 if (rgid != (gid_t) -1 || 832 (egid != (gid_t) -1 && egid != old_rgid)) 833 current->sgid = new_egid; 834 current->fsgid = new_egid; 835 current->egid = new_egid; 836 current->gid = new_rgid; 837 key_fsgid_changed(current); 838 proc_id_connector(current, PROC_EVENT_GID); 839 return 0; 840 } 841 842 /* 843 * setgid() is implemented like SysV w/ SAVED_IDS 844 * 845 * SMP: Same implicit races as above. 846 */ 847 asmlinkage long sys_setgid(gid_t gid) 848 { 849 int old_egid = current->egid; 850 int retval; 851 852 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID); 853 if (retval) 854 return retval; 855 856 if (capable(CAP_SETGID)) 857 { 858 if(old_egid != gid) 859 { 860 current->mm->dumpable = suid_dumpable; 861 smp_wmb(); 862 } 863 current->gid = current->egid = current->sgid = current->fsgid = gid; 864 } 865 else if ((gid == current->gid) || (gid == current->sgid)) 866 { 867 if(old_egid != gid) 868 { 869 current->mm->dumpable = suid_dumpable; 870 smp_wmb(); 871 } 872 current->egid = current->fsgid = gid; 873 } 874 else 875 return -EPERM; 876 877 key_fsgid_changed(current); 878 proc_id_connector(current, PROC_EVENT_GID); 879 return 0; 880 } 881 882 static int set_user(uid_t new_ruid, int dumpclear) 883 { 884 struct user_struct *new_user; 885 886 new_user = alloc_uid(new_ruid); 887 if (!new_user) 888 return -EAGAIN; 889 890 if (atomic_read(&new_user->processes) >= 891 current->signal->rlim[RLIMIT_NPROC].rlim_cur && 892 new_user != &root_user) { 893 free_uid(new_user); 894 return -EAGAIN; 895 } 896 897 switch_uid(new_user); 898 899 if(dumpclear) 900 { 901 current->mm->dumpable = suid_dumpable; 902 smp_wmb(); 903 } 904 current->uid = new_ruid; 905 return 0; 906 } 907 908 /* 909 * Unprivileged users may change the real uid to the effective uid 910 * or vice versa. (BSD-style) 911 * 912 * If you set the real uid at all, or set the effective uid to a value not 913 * equal to the real uid, then the saved uid is set to the new effective uid. 914 * 915 * This makes it possible for a setuid program to completely drop its 916 * privileges, which is often a useful assertion to make when you are doing 917 * a security audit over a program. 918 * 919 * The general idea is that a program which uses just setreuid() will be 920 * 100% compatible with BSD. A program which uses just setuid() will be 921 * 100% compatible with POSIX with saved IDs. 922 */ 923 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) 924 { 925 int old_ruid, old_euid, old_suid, new_ruid, new_euid; 926 int retval; 927 928 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE); 929 if (retval) 930 return retval; 931 932 new_ruid = old_ruid = current->uid; 933 new_euid = old_euid = current->euid; 934 old_suid = current->suid; 935 936 if (ruid != (uid_t) -1) { 937 new_ruid = ruid; 938 if ((old_ruid != ruid) && 939 (current->euid != ruid) && 940 !capable(CAP_SETUID)) 941 return -EPERM; 942 } 943 944 if (euid != (uid_t) -1) { 945 new_euid = euid; 946 if ((old_ruid != euid) && 947 (current->euid != euid) && 948 (current->suid != euid) && 949 !capable(CAP_SETUID)) 950 return -EPERM; 951 } 952 953 if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0) 954 return -EAGAIN; 955 956 if (new_euid != old_euid) 957 { 958 current->mm->dumpable = suid_dumpable; 959 smp_wmb(); 960 } 961 current->fsuid = current->euid = new_euid; 962 if (ruid != (uid_t) -1 || 963 (euid != (uid_t) -1 && euid != old_ruid)) 964 current->suid = current->euid; 965 current->fsuid = current->euid; 966 967 key_fsuid_changed(current); 968 proc_id_connector(current, PROC_EVENT_UID); 969 970 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE); 971 } 972 973 974 975 /* 976 * setuid() is implemented like SysV with SAVED_IDS 977 * 978 * Note that SAVED_ID's is deficient in that a setuid root program 979 * like sendmail, for example, cannot set its uid to be a normal 980 * user and then switch back, because if you're root, setuid() sets 981 * the saved uid too. If you don't like this, blame the bright people 982 * in the POSIX committee and/or USG. Note that the BSD-style setreuid() 983 * will allow a root program to temporarily drop privileges and be able to 984 * regain them by swapping the real and effective uid. 985 */ 986 asmlinkage long sys_setuid(uid_t uid) 987 { 988 int old_euid = current->euid; 989 int old_ruid, old_suid, new_ruid, new_suid; 990 int retval; 991 992 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID); 993 if (retval) 994 return retval; 995 996 old_ruid = new_ruid = current->uid; 997 old_suid = current->suid; 998 new_suid = old_suid; 999 1000 if (capable(CAP_SETUID)) { 1001 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0) 1002 return -EAGAIN; 1003 new_suid = uid; 1004 } else if ((uid != current->uid) && (uid != new_suid)) 1005 return -EPERM; 1006 1007 if (old_euid != uid) 1008 { 1009 current->mm->dumpable = suid_dumpable; 1010 smp_wmb(); 1011 } 1012 current->fsuid = current->euid = uid; 1013 current->suid = new_suid; 1014 1015 key_fsuid_changed(current); 1016 proc_id_connector(current, PROC_EVENT_UID); 1017 1018 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID); 1019 } 1020 1021 1022 /* 1023 * This function implements a generic ability to update ruid, euid, 1024 * and suid. This allows you to implement the 4.4 compatible seteuid(). 1025 */ 1026 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) 1027 { 1028 int old_ruid = current->uid; 1029 int old_euid = current->euid; 1030 int old_suid = current->suid; 1031 int retval; 1032 1033 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES); 1034 if (retval) 1035 return retval; 1036 1037 if (!capable(CAP_SETUID)) { 1038 if ((ruid != (uid_t) -1) && (ruid != current->uid) && 1039 (ruid != current->euid) && (ruid != current->suid)) 1040 return -EPERM; 1041 if ((euid != (uid_t) -1) && (euid != current->uid) && 1042 (euid != current->euid) && (euid != current->suid)) 1043 return -EPERM; 1044 if ((suid != (uid_t) -1) && (suid != current->uid) && 1045 (suid != current->euid) && (suid != current->suid)) 1046 return -EPERM; 1047 } 1048 if (ruid != (uid_t) -1) { 1049 if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0) 1050 return -EAGAIN; 1051 } 1052 if (euid != (uid_t) -1) { 1053 if (euid != current->euid) 1054 { 1055 current->mm->dumpable = suid_dumpable; 1056 smp_wmb(); 1057 } 1058 current->euid = euid; 1059 } 1060 current->fsuid = current->euid; 1061 if (suid != (uid_t) -1) 1062 current->suid = suid; 1063 1064 key_fsuid_changed(current); 1065 proc_id_connector(current, PROC_EVENT_UID); 1066 1067 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES); 1068 } 1069 1070 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) 1071 { 1072 int retval; 1073 1074 if (!(retval = put_user(current->uid, ruid)) && 1075 !(retval = put_user(current->euid, euid))) 1076 retval = put_user(current->suid, suid); 1077 1078 return retval; 1079 } 1080 1081 /* 1082 * Same as above, but for rgid, egid, sgid. 1083 */ 1084 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) 1085 { 1086 int retval; 1087 1088 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES); 1089 if (retval) 1090 return retval; 1091 1092 if (!capable(CAP_SETGID)) { 1093 if ((rgid != (gid_t) -1) && (rgid != current->gid) && 1094 (rgid != current->egid) && (rgid != current->sgid)) 1095 return -EPERM; 1096 if ((egid != (gid_t) -1) && (egid != current->gid) && 1097 (egid != current->egid) && (egid != current->sgid)) 1098 return -EPERM; 1099 if ((sgid != (gid_t) -1) && (sgid != current->gid) && 1100 (sgid != current->egid) && (sgid != current->sgid)) 1101 return -EPERM; 1102 } 1103 if (egid != (gid_t) -1) { 1104 if (egid != current->egid) 1105 { 1106 current->mm->dumpable = suid_dumpable; 1107 smp_wmb(); 1108 } 1109 current->egid = egid; 1110 } 1111 current->fsgid = current->egid; 1112 if (rgid != (gid_t) -1) 1113 current->gid = rgid; 1114 if (sgid != (gid_t) -1) 1115 current->sgid = sgid; 1116 1117 key_fsgid_changed(current); 1118 proc_id_connector(current, PROC_EVENT_GID); 1119 return 0; 1120 } 1121 1122 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) 1123 { 1124 int retval; 1125 1126 if (!(retval = put_user(current->gid, rgid)) && 1127 !(retval = put_user(current->egid, egid))) 1128 retval = put_user(current->sgid, sgid); 1129 1130 return retval; 1131 } 1132 1133 1134 /* 1135 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This 1136 * is used for "access()" and for the NFS daemon (letting nfsd stay at 1137 * whatever uid it wants to). It normally shadows "euid", except when 1138 * explicitly set by setfsuid() or for access.. 1139 */ 1140 asmlinkage long sys_setfsuid(uid_t uid) 1141 { 1142 int old_fsuid; 1143 1144 old_fsuid = current->fsuid; 1145 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS)) 1146 return old_fsuid; 1147 1148 if (uid == current->uid || uid == current->euid || 1149 uid == current->suid || uid == current->fsuid || 1150 capable(CAP_SETUID)) 1151 { 1152 if (uid != old_fsuid) 1153 { 1154 current->mm->dumpable = suid_dumpable; 1155 smp_wmb(); 1156 } 1157 current->fsuid = uid; 1158 } 1159 1160 key_fsuid_changed(current); 1161 proc_id_connector(current, PROC_EVENT_UID); 1162 1163 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS); 1164 1165 return old_fsuid; 1166 } 1167 1168 /* 1169 * Samma p� svenska.. 1170 */ 1171 asmlinkage long sys_setfsgid(gid_t gid) 1172 { 1173 int old_fsgid; 1174 1175 old_fsgid = current->fsgid; 1176 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS)) 1177 return old_fsgid; 1178 1179 if (gid == current->gid || gid == current->egid || 1180 gid == current->sgid || gid == current->fsgid || 1181 capable(CAP_SETGID)) 1182 { 1183 if (gid != old_fsgid) 1184 { 1185 current->mm->dumpable = suid_dumpable; 1186 smp_wmb(); 1187 } 1188 current->fsgid = gid; 1189 key_fsgid_changed(current); 1190 proc_id_connector(current, PROC_EVENT_GID); 1191 } 1192 return old_fsgid; 1193 } 1194 1195 asmlinkage long sys_times(struct tms __user * tbuf) 1196 { 1197 /* 1198 * In the SMP world we might just be unlucky and have one of 1199 * the times increment as we use it. Since the value is an 1200 * atomically safe type this is just fine. Conceptually its 1201 * as if the syscall took an instant longer to occur. 1202 */ 1203 if (tbuf) { 1204 struct tms tmp; 1205 struct task_struct *tsk = current; 1206 struct task_struct *t; 1207 cputime_t utime, stime, cutime, cstime; 1208 1209 spin_lock_irq(&tsk->sighand->siglock); 1210 utime = tsk->signal->utime; 1211 stime = tsk->signal->stime; 1212 t = tsk; 1213 do { 1214 utime = cputime_add(utime, t->utime); 1215 stime = cputime_add(stime, t->stime); 1216 t = next_thread(t); 1217 } while (t != tsk); 1218 1219 cutime = tsk->signal->cutime; 1220 cstime = tsk->signal->cstime; 1221 spin_unlock_irq(&tsk->sighand->siglock); 1222 1223 tmp.tms_utime = cputime_to_clock_t(utime); 1224 tmp.tms_stime = cputime_to_clock_t(stime); 1225 tmp.tms_cutime = cputime_to_clock_t(cutime); 1226 tmp.tms_cstime = cputime_to_clock_t(cstime); 1227 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) 1228 return -EFAULT; 1229 } 1230 return (long) jiffies_64_to_clock_t(get_jiffies_64()); 1231 } 1232 1233 /* 1234 * This needs some heavy checking ... 1235 * I just haven't the stomach for it. I also don't fully 1236 * understand sessions/pgrp etc. Let somebody who does explain it. 1237 * 1238 * OK, I think I have the protection semantics right.... this is really 1239 * only important on a multi-user system anyway, to make sure one user 1240 * can't send a signal to a process owned by another. -TYT, 12/12/91 1241 * 1242 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. 1243 * LBT 04.03.94 1244 */ 1245 1246 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) 1247 { 1248 struct task_struct *p; 1249 struct task_struct *group_leader = current->group_leader; 1250 int err = -EINVAL; 1251 1252 if (!pid) 1253 pid = group_leader->pid; 1254 if (!pgid) 1255 pgid = pid; 1256 if (pgid < 0) 1257 return -EINVAL; 1258 1259 /* From this point forward we keep holding onto the tasklist lock 1260 * so that our parent does not change from under us. -DaveM 1261 */ 1262 write_lock_irq(&tasklist_lock); 1263 1264 err = -ESRCH; 1265 p = find_task_by_pid(pid); 1266 if (!p) 1267 goto out; 1268 1269 err = -EINVAL; 1270 if (!thread_group_leader(p)) 1271 goto out; 1272 1273 if (p->real_parent == group_leader) { 1274 err = -EPERM; 1275 if (p->signal->session != group_leader->signal->session) 1276 goto out; 1277 err = -EACCES; 1278 if (p->did_exec) 1279 goto out; 1280 } else { 1281 err = -ESRCH; 1282 if (p != group_leader) 1283 goto out; 1284 } 1285 1286 err = -EPERM; 1287 if (p->signal->leader) 1288 goto out; 1289 1290 if (pgid != pid) { 1291 struct task_struct *p; 1292 1293 do_each_task_pid(pgid, PIDTYPE_PGID, p) { 1294 if (p->signal->session == group_leader->signal->session) 1295 goto ok_pgid; 1296 } while_each_task_pid(pgid, PIDTYPE_PGID, p); 1297 goto out; 1298 } 1299 1300 ok_pgid: 1301 err = security_task_setpgid(p, pgid); 1302 if (err) 1303 goto out; 1304 1305 if (process_group(p) != pgid) { 1306 detach_pid(p, PIDTYPE_PGID); 1307 p->signal->pgrp = pgid; 1308 attach_pid(p, PIDTYPE_PGID, pgid); 1309 } 1310 1311 err = 0; 1312 out: 1313 /* All paths lead to here, thus we are safe. -DaveM */ 1314 write_unlock_irq(&tasklist_lock); 1315 return err; 1316 } 1317 1318 asmlinkage long sys_getpgid(pid_t pid) 1319 { 1320 if (!pid) { 1321 return process_group(current); 1322 } else { 1323 int retval; 1324 struct task_struct *p; 1325 1326 read_lock(&tasklist_lock); 1327 p = find_task_by_pid(pid); 1328 1329 retval = -ESRCH; 1330 if (p) { 1331 retval = security_task_getpgid(p); 1332 if (!retval) 1333 retval = process_group(p); 1334 } 1335 read_unlock(&tasklist_lock); 1336 return retval; 1337 } 1338 } 1339 1340 #ifdef __ARCH_WANT_SYS_GETPGRP 1341 1342 asmlinkage long sys_getpgrp(void) 1343 { 1344 /* SMP - assuming writes are word atomic this is fine */ 1345 return process_group(current); 1346 } 1347 1348 #endif 1349 1350 asmlinkage long sys_getsid(pid_t pid) 1351 { 1352 if (!pid) { 1353 return current->signal->session; 1354 } else { 1355 int retval; 1356 struct task_struct *p; 1357 1358 read_lock(&tasklist_lock); 1359 p = find_task_by_pid(pid); 1360 1361 retval = -ESRCH; 1362 if(p) { 1363 retval = security_task_getsid(p); 1364 if (!retval) 1365 retval = p->signal->session; 1366 } 1367 read_unlock(&tasklist_lock); 1368 return retval; 1369 } 1370 } 1371 1372 asmlinkage long sys_setsid(void) 1373 { 1374 struct task_struct *group_leader = current->group_leader; 1375 struct pid *pid; 1376 int err = -EPERM; 1377 1378 mutex_lock(&tty_mutex); 1379 write_lock_irq(&tasklist_lock); 1380 1381 pid = find_pid(PIDTYPE_PGID, group_leader->pid); 1382 if (pid) 1383 goto out; 1384 1385 group_leader->signal->leader = 1; 1386 __set_special_pids(group_leader->pid, group_leader->pid); 1387 group_leader->signal->tty = NULL; 1388 group_leader->signal->tty_old_pgrp = 0; 1389 err = process_group(group_leader); 1390 out: 1391 write_unlock_irq(&tasklist_lock); 1392 mutex_unlock(&tty_mutex); 1393 return err; 1394 } 1395 1396 /* 1397 * Supplementary group IDs 1398 */ 1399 1400 /* init to 2 - one for init_task, one to ensure it is never freed */ 1401 struct group_info init_groups = { .usage = ATOMIC_INIT(2) }; 1402 1403 struct group_info *groups_alloc(int gidsetsize) 1404 { 1405 struct group_info *group_info; 1406 int nblocks; 1407 int i; 1408 1409 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK; 1410 /* Make sure we always allocate at least one indirect block pointer */ 1411 nblocks = nblocks ? : 1; 1412 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER); 1413 if (!group_info) 1414 return NULL; 1415 group_info->ngroups = gidsetsize; 1416 group_info->nblocks = nblocks; 1417 atomic_set(&group_info->usage, 1); 1418 1419 if (gidsetsize <= NGROUPS_SMALL) { 1420 group_info->blocks[0] = group_info->small_block; 1421 } else { 1422 for (i = 0; i < nblocks; i++) { 1423 gid_t *b; 1424 b = (void *)__get_free_page(GFP_USER); 1425 if (!b) 1426 goto out_undo_partial_alloc; 1427 group_info->blocks[i] = b; 1428 } 1429 } 1430 return group_info; 1431 1432 out_undo_partial_alloc: 1433 while (--i >= 0) { 1434 free_page((unsigned long)group_info->blocks[i]); 1435 } 1436 kfree(group_info); 1437 return NULL; 1438 } 1439 1440 EXPORT_SYMBOL(groups_alloc); 1441 1442 void groups_free(struct group_info *group_info) 1443 { 1444 if (group_info->blocks[0] != group_info->small_block) { 1445 int i; 1446 for (i = 0; i < group_info->nblocks; i++) 1447 free_page((unsigned long)group_info->blocks[i]); 1448 } 1449 kfree(group_info); 1450 } 1451 1452 EXPORT_SYMBOL(groups_free); 1453 1454 /* export the group_info to a user-space array */ 1455 static int groups_to_user(gid_t __user *grouplist, 1456 struct group_info *group_info) 1457 { 1458 int i; 1459 int count = group_info->ngroups; 1460 1461 for (i = 0; i < group_info->nblocks; i++) { 1462 int cp_count = min(NGROUPS_PER_BLOCK, count); 1463 int off = i * NGROUPS_PER_BLOCK; 1464 int len = cp_count * sizeof(*grouplist); 1465 1466 if (copy_to_user(grouplist+off, group_info->blocks[i], len)) 1467 return -EFAULT; 1468 1469 count -= cp_count; 1470 } 1471 return 0; 1472 } 1473 1474 /* fill a group_info from a user-space array - it must be allocated already */ 1475 static int groups_from_user(struct group_info *group_info, 1476 gid_t __user *grouplist) 1477 { 1478 int i; 1479 int count = group_info->ngroups; 1480 1481 for (i = 0; i < group_info->nblocks; i++) { 1482 int cp_count = min(NGROUPS_PER_BLOCK, count); 1483 int off = i * NGROUPS_PER_BLOCK; 1484 int len = cp_count * sizeof(*grouplist); 1485 1486 if (copy_from_user(group_info->blocks[i], grouplist+off, len)) 1487 return -EFAULT; 1488 1489 count -= cp_count; 1490 } 1491 return 0; 1492 } 1493 1494 /* a simple Shell sort */ 1495 static void groups_sort(struct group_info *group_info) 1496 { 1497 int base, max, stride; 1498 int gidsetsize = group_info->ngroups; 1499 1500 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1) 1501 ; /* nothing */ 1502 stride /= 3; 1503 1504 while (stride) { 1505 max = gidsetsize - stride; 1506 for (base = 0; base < max; base++) { 1507 int left = base; 1508 int right = left + stride; 1509 gid_t tmp = GROUP_AT(group_info, right); 1510 1511 while (left >= 0 && GROUP_AT(group_info, left) > tmp) { 1512 GROUP_AT(group_info, right) = 1513 GROUP_AT(group_info, left); 1514 right = left; 1515 left -= stride; 1516 } 1517 GROUP_AT(group_info, right) = tmp; 1518 } 1519 stride /= 3; 1520 } 1521 } 1522 1523 /* a simple bsearch */ 1524 int groups_search(struct group_info *group_info, gid_t grp) 1525 { 1526 unsigned int left, right; 1527 1528 if (!group_info) 1529 return 0; 1530 1531 left = 0; 1532 right = group_info->ngroups; 1533 while (left < right) { 1534 unsigned int mid = (left+right)/2; 1535 int cmp = grp - GROUP_AT(group_info, mid); 1536 if (cmp > 0) 1537 left = mid + 1; 1538 else if (cmp < 0) 1539 right = mid; 1540 else 1541 return 1; 1542 } 1543 return 0; 1544 } 1545 1546 /* validate and set current->group_info */ 1547 int set_current_groups(struct group_info *group_info) 1548 { 1549 int retval; 1550 struct group_info *old_info; 1551 1552 retval = security_task_setgroups(group_info); 1553 if (retval) 1554 return retval; 1555 1556 groups_sort(group_info); 1557 get_group_info(group_info); 1558 1559 task_lock(current); 1560 old_info = current->group_info; 1561 current->group_info = group_info; 1562 task_unlock(current); 1563 1564 put_group_info(old_info); 1565 1566 return 0; 1567 } 1568 1569 EXPORT_SYMBOL(set_current_groups); 1570 1571 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) 1572 { 1573 int i = 0; 1574 1575 /* 1576 * SMP: Nobody else can change our grouplist. Thus we are 1577 * safe. 1578 */ 1579 1580 if (gidsetsize < 0) 1581 return -EINVAL; 1582 1583 /* no need to grab task_lock here; it cannot change */ 1584 i = current->group_info->ngroups; 1585 if (gidsetsize) { 1586 if (i > gidsetsize) { 1587 i = -EINVAL; 1588 goto out; 1589 } 1590 if (groups_to_user(grouplist, current->group_info)) { 1591 i = -EFAULT; 1592 goto out; 1593 } 1594 } 1595 out: 1596 return i; 1597 } 1598 1599 /* 1600 * SMP: Our groups are copy-on-write. We can set them safely 1601 * without another task interfering. 1602 */ 1603 1604 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist) 1605 { 1606 struct group_info *group_info; 1607 int retval; 1608 1609 if (!capable(CAP_SETGID)) 1610 return -EPERM; 1611 if ((unsigned)gidsetsize > NGROUPS_MAX) 1612 return -EINVAL; 1613 1614 group_info = groups_alloc(gidsetsize); 1615 if (!group_info) 1616 return -ENOMEM; 1617 retval = groups_from_user(group_info, grouplist); 1618 if (retval) { 1619 put_group_info(group_info); 1620 return retval; 1621 } 1622 1623 retval = set_current_groups(group_info); 1624 put_group_info(group_info); 1625 1626 return retval; 1627 } 1628 1629 /* 1630 * Check whether we're fsgid/egid or in the supplemental group.. 1631 */ 1632 int in_group_p(gid_t grp) 1633 { 1634 int retval = 1; 1635 if (grp != current->fsgid) { 1636 retval = groups_search(current->group_info, grp); 1637 } 1638 return retval; 1639 } 1640 1641 EXPORT_SYMBOL(in_group_p); 1642 1643 int in_egroup_p(gid_t grp) 1644 { 1645 int retval = 1; 1646 if (grp != current->egid) { 1647 retval = groups_search(current->group_info, grp); 1648 } 1649 return retval; 1650 } 1651 1652 EXPORT_SYMBOL(in_egroup_p); 1653 1654 DECLARE_RWSEM(uts_sem); 1655 1656 EXPORT_SYMBOL(uts_sem); 1657 1658 asmlinkage long sys_newuname(struct new_utsname __user * name) 1659 { 1660 int errno = 0; 1661 1662 down_read(&uts_sem); 1663 if (copy_to_user(name,&system_utsname,sizeof *name)) 1664 errno = -EFAULT; 1665 up_read(&uts_sem); 1666 return errno; 1667 } 1668 1669 asmlinkage long sys_sethostname(char __user *name, int len) 1670 { 1671 int errno; 1672 char tmp[__NEW_UTS_LEN]; 1673 1674 if (!capable(CAP_SYS_ADMIN)) 1675 return -EPERM; 1676 if (len < 0 || len > __NEW_UTS_LEN) 1677 return -EINVAL; 1678 down_write(&uts_sem); 1679 errno = -EFAULT; 1680 if (!copy_from_user(tmp, name, len)) { 1681 memcpy(system_utsname.nodename, tmp, len); 1682 system_utsname.nodename[len] = 0; 1683 errno = 0; 1684 } 1685 up_write(&uts_sem); 1686 return errno; 1687 } 1688 1689 #ifdef __ARCH_WANT_SYS_GETHOSTNAME 1690 1691 asmlinkage long sys_gethostname(char __user *name, int len) 1692 { 1693 int i, errno; 1694 1695 if (len < 0) 1696 return -EINVAL; 1697 down_read(&uts_sem); 1698 i = 1 + strlen(system_utsname.nodename); 1699 if (i > len) 1700 i = len; 1701 errno = 0; 1702 if (copy_to_user(name, system_utsname.nodename, i)) 1703 errno = -EFAULT; 1704 up_read(&uts_sem); 1705 return errno; 1706 } 1707 1708 #endif 1709 1710 /* 1711 * Only setdomainname; getdomainname can be implemented by calling 1712 * uname() 1713 */ 1714 asmlinkage long sys_setdomainname(char __user *name, int len) 1715 { 1716 int errno; 1717 char tmp[__NEW_UTS_LEN]; 1718 1719 if (!capable(CAP_SYS_ADMIN)) 1720 return -EPERM; 1721 if (len < 0 || len > __NEW_UTS_LEN) 1722 return -EINVAL; 1723 1724 down_write(&uts_sem); 1725 errno = -EFAULT; 1726 if (!copy_from_user(tmp, name, len)) { 1727 memcpy(system_utsname.domainname, tmp, len); 1728 system_utsname.domainname[len] = 0; 1729 errno = 0; 1730 } 1731 up_write(&uts_sem); 1732 return errno; 1733 } 1734 1735 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim) 1736 { 1737 if (resource >= RLIM_NLIMITS) 1738 return -EINVAL; 1739 else { 1740 struct rlimit value; 1741 task_lock(current->group_leader); 1742 value = current->signal->rlim[resource]; 1743 task_unlock(current->group_leader); 1744 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; 1745 } 1746 } 1747 1748 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT 1749 1750 /* 1751 * Back compatibility for getrlimit. Needed for some apps. 1752 */ 1753 1754 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim) 1755 { 1756 struct rlimit x; 1757 if (resource >= RLIM_NLIMITS) 1758 return -EINVAL; 1759 1760 task_lock(current->group_leader); 1761 x = current->signal->rlim[resource]; 1762 task_unlock(current->group_leader); 1763 if(x.rlim_cur > 0x7FFFFFFF) 1764 x.rlim_cur = 0x7FFFFFFF; 1765 if(x.rlim_max > 0x7FFFFFFF) 1766 x.rlim_max = 0x7FFFFFFF; 1767 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0; 1768 } 1769 1770 #endif 1771 1772 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) 1773 { 1774 struct rlimit new_rlim, *old_rlim; 1775 unsigned long it_prof_secs; 1776 int retval; 1777 1778 if (resource >= RLIM_NLIMITS) 1779 return -EINVAL; 1780 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1781 return -EFAULT; 1782 if (new_rlim.rlim_cur > new_rlim.rlim_max) 1783 return -EINVAL; 1784 old_rlim = current->signal->rlim + resource; 1785 if ((new_rlim.rlim_max > old_rlim->rlim_max) && 1786 !capable(CAP_SYS_RESOURCE)) 1787 return -EPERM; 1788 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN) 1789 return -EPERM; 1790 1791 retval = security_task_setrlimit(resource, &new_rlim); 1792 if (retval) 1793 return retval; 1794 1795 task_lock(current->group_leader); 1796 *old_rlim = new_rlim; 1797 task_unlock(current->group_leader); 1798 1799 if (resource != RLIMIT_CPU) 1800 goto out; 1801 1802 /* 1803 * RLIMIT_CPU handling. Note that the kernel fails to return an error 1804 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a 1805 * very long-standing error, and fixing it now risks breakage of 1806 * applications, so we live with it 1807 */ 1808 if (new_rlim.rlim_cur == RLIM_INFINITY) 1809 goto out; 1810 1811 it_prof_secs = cputime_to_secs(current->signal->it_prof_expires); 1812 if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) { 1813 unsigned long rlim_cur = new_rlim.rlim_cur; 1814 cputime_t cputime; 1815 1816 if (rlim_cur == 0) { 1817 /* 1818 * The caller is asking for an immediate RLIMIT_CPU 1819 * expiry. But we use the zero value to mean "it was 1820 * never set". So let's cheat and make it one second 1821 * instead 1822 */ 1823 rlim_cur = 1; 1824 } 1825 cputime = secs_to_cputime(rlim_cur); 1826 read_lock(&tasklist_lock); 1827 spin_lock_irq(¤t->sighand->siglock); 1828 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); 1829 spin_unlock_irq(¤t->sighand->siglock); 1830 read_unlock(&tasklist_lock); 1831 } 1832 out: 1833 return 0; 1834 } 1835 1836 /* 1837 * It would make sense to put struct rusage in the task_struct, 1838 * except that would make the task_struct be *really big*. After 1839 * task_struct gets moved into malloc'ed memory, it would 1840 * make sense to do this. It will make moving the rest of the information 1841 * a lot simpler! (Which we're not doing right now because we're not 1842 * measuring them yet). 1843 * 1844 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have 1845 * races with threads incrementing their own counters. But since word 1846 * reads are atomic, we either get new values or old values and we don't 1847 * care which for the sums. We always take the siglock to protect reading 1848 * the c* fields from p->signal from races with exit.c updating those 1849 * fields when reaping, so a sample either gets all the additions of a 1850 * given child after it's reaped, or none so this sample is before reaping. 1851 * 1852 * tasklist_lock locking optimisation: 1853 * If we are current and single threaded, we do not need to take the tasklist 1854 * lock or the siglock. No one else can take our signal_struct away, 1855 * no one else can reap the children to update signal->c* counters, and 1856 * no one else can race with the signal-> fields. 1857 * If we do not take the tasklist_lock, the signal-> fields could be read 1858 * out of order while another thread was just exiting. So we place a 1859 * read memory barrier when we avoid the lock. On the writer side, 1860 * write memory barrier is implied in __exit_signal as __exit_signal releases 1861 * the siglock spinlock after updating the signal-> fields. 1862 * 1863 * We don't really need the siglock when we access the non c* fields 1864 * of the signal_struct (for RUSAGE_SELF) even in multithreaded 1865 * case, since we take the tasklist lock for read and the non c* signal-> 1866 * fields are updated only in __exit_signal, which is called with 1867 * tasklist_lock taken for write, hence these two threads cannot execute 1868 * concurrently. 1869 * 1870 */ 1871 1872 static void k_getrusage(struct task_struct *p, int who, struct rusage *r) 1873 { 1874 struct task_struct *t; 1875 unsigned long flags; 1876 cputime_t utime, stime; 1877 int need_lock = 0; 1878 1879 memset((char *) r, 0, sizeof *r); 1880 utime = stime = cputime_zero; 1881 1882 if (p != current || !thread_group_empty(p)) 1883 need_lock = 1; 1884 1885 if (need_lock) { 1886 read_lock(&tasklist_lock); 1887 if (unlikely(!p->signal)) { 1888 read_unlock(&tasklist_lock); 1889 return; 1890 } 1891 } else 1892 /* See locking comments above */ 1893 smp_rmb(); 1894 1895 switch (who) { 1896 case RUSAGE_BOTH: 1897 case RUSAGE_CHILDREN: 1898 spin_lock_irqsave(&p->sighand->siglock, flags); 1899 utime = p->signal->cutime; 1900 stime = p->signal->cstime; 1901 r->ru_nvcsw = p->signal->cnvcsw; 1902 r->ru_nivcsw = p->signal->cnivcsw; 1903 r->ru_minflt = p->signal->cmin_flt; 1904 r->ru_majflt = p->signal->cmaj_flt; 1905 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1906 1907 if (who == RUSAGE_CHILDREN) 1908 break; 1909 1910 case RUSAGE_SELF: 1911 utime = cputime_add(utime, p->signal->utime); 1912 stime = cputime_add(stime, p->signal->stime); 1913 r->ru_nvcsw += p->signal->nvcsw; 1914 r->ru_nivcsw += p->signal->nivcsw; 1915 r->ru_minflt += p->signal->min_flt; 1916 r->ru_majflt += p->signal->maj_flt; 1917 t = p; 1918 do { 1919 utime = cputime_add(utime, t->utime); 1920 stime = cputime_add(stime, t->stime); 1921 r->ru_nvcsw += t->nvcsw; 1922 r->ru_nivcsw += t->nivcsw; 1923 r->ru_minflt += t->min_flt; 1924 r->ru_majflt += t->maj_flt; 1925 t = next_thread(t); 1926 } while (t != p); 1927 break; 1928 1929 default: 1930 BUG(); 1931 } 1932 1933 if (need_lock) 1934 read_unlock(&tasklist_lock); 1935 cputime_to_timeval(utime, &r->ru_utime); 1936 cputime_to_timeval(stime, &r->ru_stime); 1937 } 1938 1939 int getrusage(struct task_struct *p, int who, struct rusage __user *ru) 1940 { 1941 struct rusage r; 1942 k_getrusage(p, who, &r); 1943 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1944 } 1945 1946 asmlinkage long sys_getrusage(int who, struct rusage __user *ru) 1947 { 1948 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) 1949 return -EINVAL; 1950 return getrusage(current, who, ru); 1951 } 1952 1953 asmlinkage long sys_umask(int mask) 1954 { 1955 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); 1956 return mask; 1957 } 1958 1959 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, 1960 unsigned long arg4, unsigned long arg5) 1961 { 1962 long error; 1963 1964 error = security_task_prctl(option, arg2, arg3, arg4, arg5); 1965 if (error) 1966 return error; 1967 1968 switch (option) { 1969 case PR_SET_PDEATHSIG: 1970 if (!valid_signal(arg2)) { 1971 error = -EINVAL; 1972 break; 1973 } 1974 current->pdeath_signal = arg2; 1975 break; 1976 case PR_GET_PDEATHSIG: 1977 error = put_user(current->pdeath_signal, (int __user *)arg2); 1978 break; 1979 case PR_GET_DUMPABLE: 1980 error = current->mm->dumpable; 1981 break; 1982 case PR_SET_DUMPABLE: 1983 if (arg2 < 0 || arg2 > 2) { 1984 error = -EINVAL; 1985 break; 1986 } 1987 current->mm->dumpable = arg2; 1988 break; 1989 1990 case PR_SET_UNALIGN: 1991 error = SET_UNALIGN_CTL(current, arg2); 1992 break; 1993 case PR_GET_UNALIGN: 1994 error = GET_UNALIGN_CTL(current, arg2); 1995 break; 1996 case PR_SET_FPEMU: 1997 error = SET_FPEMU_CTL(current, arg2); 1998 break; 1999 case PR_GET_FPEMU: 2000 error = GET_FPEMU_CTL(current, arg2); 2001 break; 2002 case PR_SET_FPEXC: 2003 error = SET_FPEXC_CTL(current, arg2); 2004 break; 2005 case PR_GET_FPEXC: 2006 error = GET_FPEXC_CTL(current, arg2); 2007 break; 2008 case PR_GET_TIMING: 2009 error = PR_TIMING_STATISTICAL; 2010 break; 2011 case PR_SET_TIMING: 2012 if (arg2 == PR_TIMING_STATISTICAL) 2013 error = 0; 2014 else 2015 error = -EINVAL; 2016 break; 2017 2018 case PR_GET_KEEPCAPS: 2019 if (current->keep_capabilities) 2020 error = 1; 2021 break; 2022 case PR_SET_KEEPCAPS: 2023 if (arg2 != 0 && arg2 != 1) { 2024 error = -EINVAL; 2025 break; 2026 } 2027 current->keep_capabilities = arg2; 2028 break; 2029 case PR_SET_NAME: { 2030 struct task_struct *me = current; 2031 unsigned char ncomm[sizeof(me->comm)]; 2032 2033 ncomm[sizeof(me->comm)-1] = 0; 2034 if (strncpy_from_user(ncomm, (char __user *)arg2, 2035 sizeof(me->comm)-1) < 0) 2036 return -EFAULT; 2037 set_task_comm(me, ncomm); 2038 return 0; 2039 } 2040 case PR_GET_NAME: { 2041 struct task_struct *me = current; 2042 unsigned char tcomm[sizeof(me->comm)]; 2043 2044 get_task_comm(tcomm, me); 2045 if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm))) 2046 return -EFAULT; 2047 return 0; 2048 } 2049 default: 2050 error = -EINVAL; 2051 break; 2052 } 2053 return error; 2054 } 2055