1 /* 2 * linux/kernel/sys.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/config.h> 8 #include <linux/module.h> 9 #include <linux/mm.h> 10 #include <linux/utsname.h> 11 #include <linux/mman.h> 12 #include <linux/smp_lock.h> 13 #include <linux/notifier.h> 14 #include <linux/reboot.h> 15 #include <linux/prctl.h> 16 #include <linux/init.h> 17 #include <linux/highuid.h> 18 #include <linux/fs.h> 19 #include <linux/kernel.h> 20 #include <linux/kexec.h> 21 #include <linux/workqueue.h> 22 #include <linux/capability.h> 23 #include <linux/device.h> 24 #include <linux/key.h> 25 #include <linux/times.h> 26 #include <linux/posix-timers.h> 27 #include <linux/security.h> 28 #include <linux/dcookies.h> 29 #include <linux/suspend.h> 30 #include <linux/tty.h> 31 #include <linux/signal.h> 32 #include <linux/cn_proc.h> 33 34 #include <linux/compat.h> 35 #include <linux/syscalls.h> 36 #include <linux/kprobes.h> 37 38 #include <asm/uaccess.h> 39 #include <asm/io.h> 40 #include <asm/unistd.h> 41 42 #ifndef SET_UNALIGN_CTL 43 # define SET_UNALIGN_CTL(a,b) (-EINVAL) 44 #endif 45 #ifndef GET_UNALIGN_CTL 46 # define GET_UNALIGN_CTL(a,b) (-EINVAL) 47 #endif 48 #ifndef SET_FPEMU_CTL 49 # define SET_FPEMU_CTL(a,b) (-EINVAL) 50 #endif 51 #ifndef GET_FPEMU_CTL 52 # define GET_FPEMU_CTL(a,b) (-EINVAL) 53 #endif 54 #ifndef SET_FPEXC_CTL 55 # define SET_FPEXC_CTL(a,b) (-EINVAL) 56 #endif 57 #ifndef GET_FPEXC_CTL 58 # define GET_FPEXC_CTL(a,b) (-EINVAL) 59 #endif 60 61 /* 62 * this is where the system-wide overflow UID and GID are defined, for 63 * architectures that now have 32-bit UID/GID but didn't in the past 64 */ 65 66 int overflowuid = DEFAULT_OVERFLOWUID; 67 int overflowgid = DEFAULT_OVERFLOWGID; 68 69 #ifdef CONFIG_UID16 70 EXPORT_SYMBOL(overflowuid); 71 EXPORT_SYMBOL(overflowgid); 72 #endif 73 74 /* 75 * the same as above, but for filesystems which can only store a 16-bit 76 * UID and GID. as such, this is needed on all architectures 77 */ 78 79 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; 80 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID; 81 82 EXPORT_SYMBOL(fs_overflowuid); 83 EXPORT_SYMBOL(fs_overflowgid); 84 85 /* 86 * this indicates whether you can reboot with ctrl-alt-del: the default is yes 87 */ 88 89 int C_A_D = 1; 90 int cad_pid = 1; 91 92 /* 93 * Notifier list for kernel code which wants to be called 94 * at shutdown. This is used to stop any idling DMA operations 95 * and the like. 96 */ 97 98 static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); 99 100 /* 101 * Notifier chain core routines. The exported routines below 102 * are layered on top of these, with appropriate locking added. 103 */ 104 105 static int notifier_chain_register(struct notifier_block **nl, 106 struct notifier_block *n) 107 { 108 while ((*nl) != NULL) { 109 if (n->priority > (*nl)->priority) 110 break; 111 nl = &((*nl)->next); 112 } 113 n->next = *nl; 114 rcu_assign_pointer(*nl, n); 115 return 0; 116 } 117 118 static int notifier_chain_unregister(struct notifier_block **nl, 119 struct notifier_block *n) 120 { 121 while ((*nl) != NULL) { 122 if ((*nl) == n) { 123 rcu_assign_pointer(*nl, n->next); 124 return 0; 125 } 126 nl = &((*nl)->next); 127 } 128 return -ENOENT; 129 } 130 131 static int __kprobes notifier_call_chain(struct notifier_block **nl, 132 unsigned long val, void *v) 133 { 134 int ret = NOTIFY_DONE; 135 struct notifier_block *nb; 136 137 nb = rcu_dereference(*nl); 138 while (nb) { 139 ret = nb->notifier_call(nb, val, v); 140 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) 141 break; 142 nb = rcu_dereference(nb->next); 143 } 144 return ret; 145 } 146 147 /* 148 * Atomic notifier chain routines. Registration and unregistration 149 * use a mutex, and call_chain is synchronized by RCU (no locks). 150 */ 151 152 /** 153 * atomic_notifier_chain_register - Add notifier to an atomic notifier chain 154 * @nh: Pointer to head of the atomic notifier chain 155 * @n: New entry in notifier chain 156 * 157 * Adds a notifier to an atomic notifier chain. 158 * 159 * Currently always returns zero. 160 */ 161 162 int atomic_notifier_chain_register(struct atomic_notifier_head *nh, 163 struct notifier_block *n) 164 { 165 unsigned long flags; 166 int ret; 167 168 spin_lock_irqsave(&nh->lock, flags); 169 ret = notifier_chain_register(&nh->head, n); 170 spin_unlock_irqrestore(&nh->lock, flags); 171 return ret; 172 } 173 174 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register); 175 176 /** 177 * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain 178 * @nh: Pointer to head of the atomic notifier chain 179 * @n: Entry to remove from notifier chain 180 * 181 * Removes a notifier from an atomic notifier chain. 182 * 183 * Returns zero on success or %-ENOENT on failure. 184 */ 185 int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, 186 struct notifier_block *n) 187 { 188 unsigned long flags; 189 int ret; 190 191 spin_lock_irqsave(&nh->lock, flags); 192 ret = notifier_chain_unregister(&nh->head, n); 193 spin_unlock_irqrestore(&nh->lock, flags); 194 synchronize_rcu(); 195 return ret; 196 } 197 198 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); 199 200 /** 201 * atomic_notifier_call_chain - Call functions in an atomic notifier chain 202 * @nh: Pointer to head of the atomic notifier chain 203 * @val: Value passed unmodified to notifier function 204 * @v: Pointer passed unmodified to notifier function 205 * 206 * Calls each function in a notifier chain in turn. The functions 207 * run in an atomic context, so they must not block. 208 * This routine uses RCU to synchronize with changes to the chain. 209 * 210 * If the return value of the notifier can be and'ed 211 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain 212 * will return immediately, with the return value of 213 * the notifier function which halted execution. 214 * Otherwise the return value is the return value 215 * of the last notifier function called. 216 */ 217 218 int atomic_notifier_call_chain(struct atomic_notifier_head *nh, 219 unsigned long val, void *v) 220 { 221 int ret; 222 223 rcu_read_lock(); 224 ret = notifier_call_chain(&nh->head, val, v); 225 rcu_read_unlock(); 226 return ret; 227 } 228 229 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); 230 231 /* 232 * Blocking notifier chain routines. All access to the chain is 233 * synchronized by an rwsem. 234 */ 235 236 /** 237 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain 238 * @nh: Pointer to head of the blocking notifier chain 239 * @n: New entry in notifier chain 240 * 241 * Adds a notifier to a blocking notifier chain. 242 * Must be called in process context. 243 * 244 * Currently always returns zero. 245 */ 246 247 int blocking_notifier_chain_register(struct blocking_notifier_head *nh, 248 struct notifier_block *n) 249 { 250 int ret; 251 252 /* 253 * This code gets used during boot-up, when task switching is 254 * not yet working and interrupts must remain disabled. At 255 * such times we must not call down_write(). 256 */ 257 if (unlikely(system_state == SYSTEM_BOOTING)) 258 return notifier_chain_register(&nh->head, n); 259 260 down_write(&nh->rwsem); 261 ret = notifier_chain_register(&nh->head, n); 262 up_write(&nh->rwsem); 263 return ret; 264 } 265 266 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); 267 268 /** 269 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain 270 * @nh: Pointer to head of the blocking notifier chain 271 * @n: Entry to remove from notifier chain 272 * 273 * Removes a notifier from a blocking notifier chain. 274 * Must be called from process context. 275 * 276 * Returns zero on success or %-ENOENT on failure. 277 */ 278 int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, 279 struct notifier_block *n) 280 { 281 int ret; 282 283 /* 284 * This code gets used during boot-up, when task switching is 285 * not yet working and interrupts must remain disabled. At 286 * such times we must not call down_write(). 287 */ 288 if (unlikely(system_state == SYSTEM_BOOTING)) 289 return notifier_chain_unregister(&nh->head, n); 290 291 down_write(&nh->rwsem); 292 ret = notifier_chain_unregister(&nh->head, n); 293 up_write(&nh->rwsem); 294 return ret; 295 } 296 297 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); 298 299 /** 300 * blocking_notifier_call_chain - Call functions in a blocking notifier chain 301 * @nh: Pointer to head of the blocking notifier chain 302 * @val: Value passed unmodified to notifier function 303 * @v: Pointer passed unmodified to notifier function 304 * 305 * Calls each function in a notifier chain in turn. The functions 306 * run in a process context, so they are allowed to block. 307 * 308 * If the return value of the notifier can be and'ed 309 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain 310 * will return immediately, with the return value of 311 * the notifier function which halted execution. 312 * Otherwise the return value is the return value 313 * of the last notifier function called. 314 */ 315 316 int blocking_notifier_call_chain(struct blocking_notifier_head *nh, 317 unsigned long val, void *v) 318 { 319 int ret; 320 321 down_read(&nh->rwsem); 322 ret = notifier_call_chain(&nh->head, val, v); 323 up_read(&nh->rwsem); 324 return ret; 325 } 326 327 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); 328 329 /* 330 * Raw notifier chain routines. There is no protection; 331 * the caller must provide it. Use at your own risk! 332 */ 333 334 /** 335 * raw_notifier_chain_register - Add notifier to a raw notifier chain 336 * @nh: Pointer to head of the raw notifier chain 337 * @n: New entry in notifier chain 338 * 339 * Adds a notifier to a raw notifier chain. 340 * All locking must be provided by the caller. 341 * 342 * Currently always returns zero. 343 */ 344 345 int raw_notifier_chain_register(struct raw_notifier_head *nh, 346 struct notifier_block *n) 347 { 348 return notifier_chain_register(&nh->head, n); 349 } 350 351 EXPORT_SYMBOL_GPL(raw_notifier_chain_register); 352 353 /** 354 * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain 355 * @nh: Pointer to head of the raw notifier chain 356 * @n: Entry to remove from notifier chain 357 * 358 * Removes a notifier from a raw notifier chain. 359 * All locking must be provided by the caller. 360 * 361 * Returns zero on success or %-ENOENT on failure. 362 */ 363 int raw_notifier_chain_unregister(struct raw_notifier_head *nh, 364 struct notifier_block *n) 365 { 366 return notifier_chain_unregister(&nh->head, n); 367 } 368 369 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); 370 371 /** 372 * raw_notifier_call_chain - Call functions in a raw notifier chain 373 * @nh: Pointer to head of the raw notifier chain 374 * @val: Value passed unmodified to notifier function 375 * @v: Pointer passed unmodified to notifier function 376 * 377 * Calls each function in a notifier chain in turn. The functions 378 * run in an undefined context. 379 * All locking must be provided by the caller. 380 * 381 * If the return value of the notifier can be and'ed 382 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain 383 * will return immediately, with the return value of 384 * the notifier function which halted execution. 385 * Otherwise the return value is the return value 386 * of the last notifier function called. 387 */ 388 389 int raw_notifier_call_chain(struct raw_notifier_head *nh, 390 unsigned long val, void *v) 391 { 392 return notifier_call_chain(&nh->head, val, v); 393 } 394 395 EXPORT_SYMBOL_GPL(raw_notifier_call_chain); 396 397 /** 398 * register_reboot_notifier - Register function to be called at reboot time 399 * @nb: Info about notifier function to be called 400 * 401 * Registers a function with the list of functions 402 * to be called at reboot time. 403 * 404 * Currently always returns zero, as blocking_notifier_chain_register 405 * always returns zero. 406 */ 407 408 int register_reboot_notifier(struct notifier_block * nb) 409 { 410 return blocking_notifier_chain_register(&reboot_notifier_list, nb); 411 } 412 413 EXPORT_SYMBOL(register_reboot_notifier); 414 415 /** 416 * unregister_reboot_notifier - Unregister previously registered reboot notifier 417 * @nb: Hook to be unregistered 418 * 419 * Unregisters a previously registered reboot 420 * notifier function. 421 * 422 * Returns zero on success, or %-ENOENT on failure. 423 */ 424 425 int unregister_reboot_notifier(struct notifier_block * nb) 426 { 427 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); 428 } 429 430 EXPORT_SYMBOL(unregister_reboot_notifier); 431 432 static int set_one_prio(struct task_struct *p, int niceval, int error) 433 { 434 int no_nice; 435 436 if (p->uid != current->euid && 437 p->euid != current->euid && !capable(CAP_SYS_NICE)) { 438 error = -EPERM; 439 goto out; 440 } 441 if (niceval < task_nice(p) && !can_nice(p, niceval)) { 442 error = -EACCES; 443 goto out; 444 } 445 no_nice = security_task_setnice(p, niceval); 446 if (no_nice) { 447 error = no_nice; 448 goto out; 449 } 450 if (error == -ESRCH) 451 error = 0; 452 set_user_nice(p, niceval); 453 out: 454 return error; 455 } 456 457 asmlinkage long sys_setpriority(int which, int who, int niceval) 458 { 459 struct task_struct *g, *p; 460 struct user_struct *user; 461 int error = -EINVAL; 462 463 if (which > 2 || which < 0) 464 goto out; 465 466 /* normalize: avoid signed division (rounding problems) */ 467 error = -ESRCH; 468 if (niceval < -20) 469 niceval = -20; 470 if (niceval > 19) 471 niceval = 19; 472 473 read_lock(&tasklist_lock); 474 switch (which) { 475 case PRIO_PROCESS: 476 if (!who) 477 who = current->pid; 478 p = find_task_by_pid(who); 479 if (p) 480 error = set_one_prio(p, niceval, error); 481 break; 482 case PRIO_PGRP: 483 if (!who) 484 who = process_group(current); 485 do_each_task_pid(who, PIDTYPE_PGID, p) { 486 error = set_one_prio(p, niceval, error); 487 } while_each_task_pid(who, PIDTYPE_PGID, p); 488 break; 489 case PRIO_USER: 490 user = current->user; 491 if (!who) 492 who = current->uid; 493 else 494 if ((who != current->uid) && !(user = find_user(who))) 495 goto out_unlock; /* No processes for this user */ 496 497 do_each_thread(g, p) 498 if (p->uid == who) 499 error = set_one_prio(p, niceval, error); 500 while_each_thread(g, p); 501 if (who != current->uid) 502 free_uid(user); /* For find_user() */ 503 break; 504 } 505 out_unlock: 506 read_unlock(&tasklist_lock); 507 out: 508 return error; 509 } 510 511 /* 512 * Ugh. To avoid negative return values, "getpriority()" will 513 * not return the normal nice-value, but a negated value that 514 * has been offset by 20 (ie it returns 40..1 instead of -20..19) 515 * to stay compatible. 516 */ 517 asmlinkage long sys_getpriority(int which, int who) 518 { 519 struct task_struct *g, *p; 520 struct user_struct *user; 521 long niceval, retval = -ESRCH; 522 523 if (which > 2 || which < 0) 524 return -EINVAL; 525 526 read_lock(&tasklist_lock); 527 switch (which) { 528 case PRIO_PROCESS: 529 if (!who) 530 who = current->pid; 531 p = find_task_by_pid(who); 532 if (p) { 533 niceval = 20 - task_nice(p); 534 if (niceval > retval) 535 retval = niceval; 536 } 537 break; 538 case PRIO_PGRP: 539 if (!who) 540 who = process_group(current); 541 do_each_task_pid(who, PIDTYPE_PGID, p) { 542 niceval = 20 - task_nice(p); 543 if (niceval > retval) 544 retval = niceval; 545 } while_each_task_pid(who, PIDTYPE_PGID, p); 546 break; 547 case PRIO_USER: 548 user = current->user; 549 if (!who) 550 who = current->uid; 551 else 552 if ((who != current->uid) && !(user = find_user(who))) 553 goto out_unlock; /* No processes for this user */ 554 555 do_each_thread(g, p) 556 if (p->uid == who) { 557 niceval = 20 - task_nice(p); 558 if (niceval > retval) 559 retval = niceval; 560 } 561 while_each_thread(g, p); 562 if (who != current->uid) 563 free_uid(user); /* for find_user() */ 564 break; 565 } 566 out_unlock: 567 read_unlock(&tasklist_lock); 568 569 return retval; 570 } 571 572 /** 573 * emergency_restart - reboot the system 574 * 575 * Without shutting down any hardware or taking any locks 576 * reboot the system. This is called when we know we are in 577 * trouble so this is our best effort to reboot. This is 578 * safe to call in interrupt context. 579 */ 580 void emergency_restart(void) 581 { 582 machine_emergency_restart(); 583 } 584 EXPORT_SYMBOL_GPL(emergency_restart); 585 586 void kernel_restart_prepare(char *cmd) 587 { 588 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); 589 system_state = SYSTEM_RESTART; 590 device_shutdown(); 591 } 592 593 /** 594 * kernel_restart - reboot the system 595 * @cmd: pointer to buffer containing command to execute for restart 596 * or %NULL 597 * 598 * Shutdown everything and perform a clean reboot. 599 * This is not safe to call in interrupt context. 600 */ 601 void kernel_restart(char *cmd) 602 { 603 kernel_restart_prepare(cmd); 604 if (!cmd) { 605 printk(KERN_EMERG "Restarting system.\n"); 606 } else { 607 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); 608 } 609 printk(".\n"); 610 machine_restart(cmd); 611 } 612 EXPORT_SYMBOL_GPL(kernel_restart); 613 614 /** 615 * kernel_kexec - reboot the system 616 * 617 * Move into place and start executing a preloaded standalone 618 * executable. If nothing was preloaded return an error. 619 */ 620 void kernel_kexec(void) 621 { 622 #ifdef CONFIG_KEXEC 623 struct kimage *image; 624 image = xchg(&kexec_image, NULL); 625 if (!image) { 626 return; 627 } 628 kernel_restart_prepare(NULL); 629 printk(KERN_EMERG "Starting new kernel\n"); 630 machine_shutdown(); 631 machine_kexec(image); 632 #endif 633 } 634 EXPORT_SYMBOL_GPL(kernel_kexec); 635 636 void kernel_shutdown_prepare(enum system_states state) 637 { 638 blocking_notifier_call_chain(&reboot_notifier_list, 639 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); 640 system_state = state; 641 device_shutdown(); 642 } 643 /** 644 * kernel_halt - halt the system 645 * 646 * Shutdown everything and perform a clean system halt. 647 */ 648 void kernel_halt(void) 649 { 650 kernel_shutdown_prepare(SYSTEM_HALT); 651 printk(KERN_EMERG "System halted.\n"); 652 machine_halt(); 653 } 654 655 EXPORT_SYMBOL_GPL(kernel_halt); 656 657 /** 658 * kernel_power_off - power_off the system 659 * 660 * Shutdown everything and perform a clean system power_off. 661 */ 662 void kernel_power_off(void) 663 { 664 kernel_shutdown_prepare(SYSTEM_POWER_OFF); 665 printk(KERN_EMERG "Power down.\n"); 666 machine_power_off(); 667 } 668 EXPORT_SYMBOL_GPL(kernel_power_off); 669 /* 670 * Reboot system call: for obvious reasons only root may call it, 671 * and even root needs to set up some magic numbers in the registers 672 * so that some mistake won't make this reboot the whole machine. 673 * You can also set the meaning of the ctrl-alt-del-key here. 674 * 675 * reboot doesn't sync: do that yourself before calling this. 676 */ 677 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg) 678 { 679 char buffer[256]; 680 681 /* We only trust the superuser with rebooting the system. */ 682 if (!capable(CAP_SYS_BOOT)) 683 return -EPERM; 684 685 /* For safety, we require "magic" arguments. */ 686 if (magic1 != LINUX_REBOOT_MAGIC1 || 687 (magic2 != LINUX_REBOOT_MAGIC2 && 688 magic2 != LINUX_REBOOT_MAGIC2A && 689 magic2 != LINUX_REBOOT_MAGIC2B && 690 magic2 != LINUX_REBOOT_MAGIC2C)) 691 return -EINVAL; 692 693 /* Instead of trying to make the power_off code look like 694 * halt when pm_power_off is not set do it the easy way. 695 */ 696 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off) 697 cmd = LINUX_REBOOT_CMD_HALT; 698 699 lock_kernel(); 700 switch (cmd) { 701 case LINUX_REBOOT_CMD_RESTART: 702 kernel_restart(NULL); 703 break; 704 705 case LINUX_REBOOT_CMD_CAD_ON: 706 C_A_D = 1; 707 break; 708 709 case LINUX_REBOOT_CMD_CAD_OFF: 710 C_A_D = 0; 711 break; 712 713 case LINUX_REBOOT_CMD_HALT: 714 kernel_halt(); 715 unlock_kernel(); 716 do_exit(0); 717 break; 718 719 case LINUX_REBOOT_CMD_POWER_OFF: 720 kernel_power_off(); 721 unlock_kernel(); 722 do_exit(0); 723 break; 724 725 case LINUX_REBOOT_CMD_RESTART2: 726 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) { 727 unlock_kernel(); 728 return -EFAULT; 729 } 730 buffer[sizeof(buffer) - 1] = '\0'; 731 732 kernel_restart(buffer); 733 break; 734 735 case LINUX_REBOOT_CMD_KEXEC: 736 kernel_kexec(); 737 unlock_kernel(); 738 return -EINVAL; 739 740 #ifdef CONFIG_SOFTWARE_SUSPEND 741 case LINUX_REBOOT_CMD_SW_SUSPEND: 742 { 743 int ret = software_suspend(); 744 unlock_kernel(); 745 return ret; 746 } 747 #endif 748 749 default: 750 unlock_kernel(); 751 return -EINVAL; 752 } 753 unlock_kernel(); 754 return 0; 755 } 756 757 static void deferred_cad(void *dummy) 758 { 759 kernel_restart(NULL); 760 } 761 762 /* 763 * This function gets called by ctrl-alt-del - ie the keyboard interrupt. 764 * As it's called within an interrupt, it may NOT sync: the only choice 765 * is whether to reboot at once, or just ignore the ctrl-alt-del. 766 */ 767 void ctrl_alt_del(void) 768 { 769 static DECLARE_WORK(cad_work, deferred_cad, NULL); 770 771 if (C_A_D) 772 schedule_work(&cad_work); 773 else 774 kill_proc(cad_pid, SIGINT, 1); 775 } 776 777 778 /* 779 * Unprivileged users may change the real gid to the effective gid 780 * or vice versa. (BSD-style) 781 * 782 * If you set the real gid at all, or set the effective gid to a value not 783 * equal to the real gid, then the saved gid is set to the new effective gid. 784 * 785 * This makes it possible for a setgid program to completely drop its 786 * privileges, which is often a useful assertion to make when you are doing 787 * a security audit over a program. 788 * 789 * The general idea is that a program which uses just setregid() will be 790 * 100% compatible with BSD. A program which uses just setgid() will be 791 * 100% compatible with POSIX with saved IDs. 792 * 793 * SMP: There are not races, the GIDs are checked only by filesystem 794 * operations (as far as semantic preservation is concerned). 795 */ 796 asmlinkage long sys_setregid(gid_t rgid, gid_t egid) 797 { 798 int old_rgid = current->gid; 799 int old_egid = current->egid; 800 int new_rgid = old_rgid; 801 int new_egid = old_egid; 802 int retval; 803 804 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE); 805 if (retval) 806 return retval; 807 808 if (rgid != (gid_t) -1) { 809 if ((old_rgid == rgid) || 810 (current->egid==rgid) || 811 capable(CAP_SETGID)) 812 new_rgid = rgid; 813 else 814 return -EPERM; 815 } 816 if (egid != (gid_t) -1) { 817 if ((old_rgid == egid) || 818 (current->egid == egid) || 819 (current->sgid == egid) || 820 capable(CAP_SETGID)) 821 new_egid = egid; 822 else { 823 return -EPERM; 824 } 825 } 826 if (new_egid != old_egid) 827 { 828 current->mm->dumpable = suid_dumpable; 829 smp_wmb(); 830 } 831 if (rgid != (gid_t) -1 || 832 (egid != (gid_t) -1 && egid != old_rgid)) 833 current->sgid = new_egid; 834 current->fsgid = new_egid; 835 current->egid = new_egid; 836 current->gid = new_rgid; 837 key_fsgid_changed(current); 838 proc_id_connector(current, PROC_EVENT_GID); 839 return 0; 840 } 841 842 /* 843 * setgid() is implemented like SysV w/ SAVED_IDS 844 * 845 * SMP: Same implicit races as above. 846 */ 847 asmlinkage long sys_setgid(gid_t gid) 848 { 849 int old_egid = current->egid; 850 int retval; 851 852 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID); 853 if (retval) 854 return retval; 855 856 if (capable(CAP_SETGID)) 857 { 858 if(old_egid != gid) 859 { 860 current->mm->dumpable = suid_dumpable; 861 smp_wmb(); 862 } 863 current->gid = current->egid = current->sgid = current->fsgid = gid; 864 } 865 else if ((gid == current->gid) || (gid == current->sgid)) 866 { 867 if(old_egid != gid) 868 { 869 current->mm->dumpable = suid_dumpable; 870 smp_wmb(); 871 } 872 current->egid = current->fsgid = gid; 873 } 874 else 875 return -EPERM; 876 877 key_fsgid_changed(current); 878 proc_id_connector(current, PROC_EVENT_GID); 879 return 0; 880 } 881 882 static int set_user(uid_t new_ruid, int dumpclear) 883 { 884 struct user_struct *new_user; 885 886 new_user = alloc_uid(new_ruid); 887 if (!new_user) 888 return -EAGAIN; 889 890 if (atomic_read(&new_user->processes) >= 891 current->signal->rlim[RLIMIT_NPROC].rlim_cur && 892 new_user != &root_user) { 893 free_uid(new_user); 894 return -EAGAIN; 895 } 896 897 switch_uid(new_user); 898 899 if(dumpclear) 900 { 901 current->mm->dumpable = suid_dumpable; 902 smp_wmb(); 903 } 904 current->uid = new_ruid; 905 return 0; 906 } 907 908 /* 909 * Unprivileged users may change the real uid to the effective uid 910 * or vice versa. (BSD-style) 911 * 912 * If you set the real uid at all, or set the effective uid to a value not 913 * equal to the real uid, then the saved uid is set to the new effective uid. 914 * 915 * This makes it possible for a setuid program to completely drop its 916 * privileges, which is often a useful assertion to make when you are doing 917 * a security audit over a program. 918 * 919 * The general idea is that a program which uses just setreuid() will be 920 * 100% compatible with BSD. A program which uses just setuid() will be 921 * 100% compatible with POSIX with saved IDs. 922 */ 923 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) 924 { 925 int old_ruid, old_euid, old_suid, new_ruid, new_euid; 926 int retval; 927 928 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE); 929 if (retval) 930 return retval; 931 932 new_ruid = old_ruid = current->uid; 933 new_euid = old_euid = current->euid; 934 old_suid = current->suid; 935 936 if (ruid != (uid_t) -1) { 937 new_ruid = ruid; 938 if ((old_ruid != ruid) && 939 (current->euid != ruid) && 940 !capable(CAP_SETUID)) 941 return -EPERM; 942 } 943 944 if (euid != (uid_t) -1) { 945 new_euid = euid; 946 if ((old_ruid != euid) && 947 (current->euid != euid) && 948 (current->suid != euid) && 949 !capable(CAP_SETUID)) 950 return -EPERM; 951 } 952 953 if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0) 954 return -EAGAIN; 955 956 if (new_euid != old_euid) 957 { 958 current->mm->dumpable = suid_dumpable; 959 smp_wmb(); 960 } 961 current->fsuid = current->euid = new_euid; 962 if (ruid != (uid_t) -1 || 963 (euid != (uid_t) -1 && euid != old_ruid)) 964 current->suid = current->euid; 965 current->fsuid = current->euid; 966 967 key_fsuid_changed(current); 968 proc_id_connector(current, PROC_EVENT_UID); 969 970 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE); 971 } 972 973 974 975 /* 976 * setuid() is implemented like SysV with SAVED_IDS 977 * 978 * Note that SAVED_ID's is deficient in that a setuid root program 979 * like sendmail, for example, cannot set its uid to be a normal 980 * user and then switch back, because if you're root, setuid() sets 981 * the saved uid too. If you don't like this, blame the bright people 982 * in the POSIX committee and/or USG. Note that the BSD-style setreuid() 983 * will allow a root program to temporarily drop privileges and be able to 984 * regain them by swapping the real and effective uid. 985 */ 986 asmlinkage long sys_setuid(uid_t uid) 987 { 988 int old_euid = current->euid; 989 int old_ruid, old_suid, new_ruid, new_suid; 990 int retval; 991 992 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID); 993 if (retval) 994 return retval; 995 996 old_ruid = new_ruid = current->uid; 997 old_suid = current->suid; 998 new_suid = old_suid; 999 1000 if (capable(CAP_SETUID)) { 1001 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0) 1002 return -EAGAIN; 1003 new_suid = uid; 1004 } else if ((uid != current->uid) && (uid != new_suid)) 1005 return -EPERM; 1006 1007 if (old_euid != uid) 1008 { 1009 current->mm->dumpable = suid_dumpable; 1010 smp_wmb(); 1011 } 1012 current->fsuid = current->euid = uid; 1013 current->suid = new_suid; 1014 1015 key_fsuid_changed(current); 1016 proc_id_connector(current, PROC_EVENT_UID); 1017 1018 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID); 1019 } 1020 1021 1022 /* 1023 * This function implements a generic ability to update ruid, euid, 1024 * and suid. This allows you to implement the 4.4 compatible seteuid(). 1025 */ 1026 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) 1027 { 1028 int old_ruid = current->uid; 1029 int old_euid = current->euid; 1030 int old_suid = current->suid; 1031 int retval; 1032 1033 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES); 1034 if (retval) 1035 return retval; 1036 1037 if (!capable(CAP_SETUID)) { 1038 if ((ruid != (uid_t) -1) && (ruid != current->uid) && 1039 (ruid != current->euid) && (ruid != current->suid)) 1040 return -EPERM; 1041 if ((euid != (uid_t) -1) && (euid != current->uid) && 1042 (euid != current->euid) && (euid != current->suid)) 1043 return -EPERM; 1044 if ((suid != (uid_t) -1) && (suid != current->uid) && 1045 (suid != current->euid) && (suid != current->suid)) 1046 return -EPERM; 1047 } 1048 if (ruid != (uid_t) -1) { 1049 if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0) 1050 return -EAGAIN; 1051 } 1052 if (euid != (uid_t) -1) { 1053 if (euid != current->euid) 1054 { 1055 current->mm->dumpable = suid_dumpable; 1056 smp_wmb(); 1057 } 1058 current->euid = euid; 1059 } 1060 current->fsuid = current->euid; 1061 if (suid != (uid_t) -1) 1062 current->suid = suid; 1063 1064 key_fsuid_changed(current); 1065 proc_id_connector(current, PROC_EVENT_UID); 1066 1067 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES); 1068 } 1069 1070 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) 1071 { 1072 int retval; 1073 1074 if (!(retval = put_user(current->uid, ruid)) && 1075 !(retval = put_user(current->euid, euid))) 1076 retval = put_user(current->suid, suid); 1077 1078 return retval; 1079 } 1080 1081 /* 1082 * Same as above, but for rgid, egid, sgid. 1083 */ 1084 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) 1085 { 1086 int retval; 1087 1088 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES); 1089 if (retval) 1090 return retval; 1091 1092 if (!capable(CAP_SETGID)) { 1093 if ((rgid != (gid_t) -1) && (rgid != current->gid) && 1094 (rgid != current->egid) && (rgid != current->sgid)) 1095 return -EPERM; 1096 if ((egid != (gid_t) -1) && (egid != current->gid) && 1097 (egid != current->egid) && (egid != current->sgid)) 1098 return -EPERM; 1099 if ((sgid != (gid_t) -1) && (sgid != current->gid) && 1100 (sgid != current->egid) && (sgid != current->sgid)) 1101 return -EPERM; 1102 } 1103 if (egid != (gid_t) -1) { 1104 if (egid != current->egid) 1105 { 1106 current->mm->dumpable = suid_dumpable; 1107 smp_wmb(); 1108 } 1109 current->egid = egid; 1110 } 1111 current->fsgid = current->egid; 1112 if (rgid != (gid_t) -1) 1113 current->gid = rgid; 1114 if (sgid != (gid_t) -1) 1115 current->sgid = sgid; 1116 1117 key_fsgid_changed(current); 1118 proc_id_connector(current, PROC_EVENT_GID); 1119 return 0; 1120 } 1121 1122 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) 1123 { 1124 int retval; 1125 1126 if (!(retval = put_user(current->gid, rgid)) && 1127 !(retval = put_user(current->egid, egid))) 1128 retval = put_user(current->sgid, sgid); 1129 1130 return retval; 1131 } 1132 1133 1134 /* 1135 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This 1136 * is used for "access()" and for the NFS daemon (letting nfsd stay at 1137 * whatever uid it wants to). It normally shadows "euid", except when 1138 * explicitly set by setfsuid() or for access.. 1139 */ 1140 asmlinkage long sys_setfsuid(uid_t uid) 1141 { 1142 int old_fsuid; 1143 1144 old_fsuid = current->fsuid; 1145 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS)) 1146 return old_fsuid; 1147 1148 if (uid == current->uid || uid == current->euid || 1149 uid == current->suid || uid == current->fsuid || 1150 capable(CAP_SETUID)) 1151 { 1152 if (uid != old_fsuid) 1153 { 1154 current->mm->dumpable = suid_dumpable; 1155 smp_wmb(); 1156 } 1157 current->fsuid = uid; 1158 } 1159 1160 key_fsuid_changed(current); 1161 proc_id_connector(current, PROC_EVENT_UID); 1162 1163 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS); 1164 1165 return old_fsuid; 1166 } 1167 1168 /* 1169 * Samma p� svenska.. 1170 */ 1171 asmlinkage long sys_setfsgid(gid_t gid) 1172 { 1173 int old_fsgid; 1174 1175 old_fsgid = current->fsgid; 1176 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS)) 1177 return old_fsgid; 1178 1179 if (gid == current->gid || gid == current->egid || 1180 gid == current->sgid || gid == current->fsgid || 1181 capable(CAP_SETGID)) 1182 { 1183 if (gid != old_fsgid) 1184 { 1185 current->mm->dumpable = suid_dumpable; 1186 smp_wmb(); 1187 } 1188 current->fsgid = gid; 1189 key_fsgid_changed(current); 1190 proc_id_connector(current, PROC_EVENT_GID); 1191 } 1192 return old_fsgid; 1193 } 1194 1195 asmlinkage long sys_times(struct tms __user * tbuf) 1196 { 1197 /* 1198 * In the SMP world we might just be unlucky and have one of 1199 * the times increment as we use it. Since the value is an 1200 * atomically safe type this is just fine. Conceptually its 1201 * as if the syscall took an instant longer to occur. 1202 */ 1203 if (tbuf) { 1204 struct tms tmp; 1205 struct task_struct *tsk = current; 1206 struct task_struct *t; 1207 cputime_t utime, stime, cutime, cstime; 1208 1209 spin_lock_irq(&tsk->sighand->siglock); 1210 utime = tsk->signal->utime; 1211 stime = tsk->signal->stime; 1212 t = tsk; 1213 do { 1214 utime = cputime_add(utime, t->utime); 1215 stime = cputime_add(stime, t->stime); 1216 t = next_thread(t); 1217 } while (t != tsk); 1218 1219 cutime = tsk->signal->cutime; 1220 cstime = tsk->signal->cstime; 1221 spin_unlock_irq(&tsk->sighand->siglock); 1222 1223 tmp.tms_utime = cputime_to_clock_t(utime); 1224 tmp.tms_stime = cputime_to_clock_t(stime); 1225 tmp.tms_cutime = cputime_to_clock_t(cutime); 1226 tmp.tms_cstime = cputime_to_clock_t(cstime); 1227 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) 1228 return -EFAULT; 1229 } 1230 return (long) jiffies_64_to_clock_t(get_jiffies_64()); 1231 } 1232 1233 /* 1234 * This needs some heavy checking ... 1235 * I just haven't the stomach for it. I also don't fully 1236 * understand sessions/pgrp etc. Let somebody who does explain it. 1237 * 1238 * OK, I think I have the protection semantics right.... this is really 1239 * only important on a multi-user system anyway, to make sure one user 1240 * can't send a signal to a process owned by another. -TYT, 12/12/91 1241 * 1242 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. 1243 * LBT 04.03.94 1244 */ 1245 1246 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) 1247 { 1248 struct task_struct *p; 1249 struct task_struct *group_leader = current->group_leader; 1250 int err = -EINVAL; 1251 1252 if (!pid) 1253 pid = group_leader->pid; 1254 if (!pgid) 1255 pgid = pid; 1256 if (pgid < 0) 1257 return -EINVAL; 1258 1259 /* From this point forward we keep holding onto the tasklist lock 1260 * so that our parent does not change from under us. -DaveM 1261 */ 1262 write_lock_irq(&tasklist_lock); 1263 1264 err = -ESRCH; 1265 p = find_task_by_pid(pid); 1266 if (!p) 1267 goto out; 1268 1269 err = -EINVAL; 1270 if (!thread_group_leader(p)) 1271 goto out; 1272 1273 if (p->real_parent == group_leader) { 1274 err = -EPERM; 1275 if (p->signal->session != group_leader->signal->session) 1276 goto out; 1277 err = -EACCES; 1278 if (p->did_exec) 1279 goto out; 1280 } else { 1281 err = -ESRCH; 1282 if (p != group_leader) 1283 goto out; 1284 } 1285 1286 err = -EPERM; 1287 if (p->signal->leader) 1288 goto out; 1289 1290 if (pgid != pid) { 1291 struct task_struct *p; 1292 1293 do_each_task_pid(pgid, PIDTYPE_PGID, p) { 1294 if (p->signal->session == group_leader->signal->session) 1295 goto ok_pgid; 1296 } while_each_task_pid(pgid, PIDTYPE_PGID, p); 1297 goto out; 1298 } 1299 1300 ok_pgid: 1301 err = security_task_setpgid(p, pgid); 1302 if (err) 1303 goto out; 1304 1305 if (process_group(p) != pgid) { 1306 detach_pid(p, PIDTYPE_PGID); 1307 p->signal->pgrp = pgid; 1308 attach_pid(p, PIDTYPE_PGID, pgid); 1309 } 1310 1311 err = 0; 1312 out: 1313 /* All paths lead to here, thus we are safe. -DaveM */ 1314 write_unlock_irq(&tasklist_lock); 1315 return err; 1316 } 1317 1318 asmlinkage long sys_getpgid(pid_t pid) 1319 { 1320 if (!pid) { 1321 return process_group(current); 1322 } else { 1323 int retval; 1324 struct task_struct *p; 1325 1326 read_lock(&tasklist_lock); 1327 p = find_task_by_pid(pid); 1328 1329 retval = -ESRCH; 1330 if (p) { 1331 retval = security_task_getpgid(p); 1332 if (!retval) 1333 retval = process_group(p); 1334 } 1335 read_unlock(&tasklist_lock); 1336 return retval; 1337 } 1338 } 1339 1340 #ifdef __ARCH_WANT_SYS_GETPGRP 1341 1342 asmlinkage long sys_getpgrp(void) 1343 { 1344 /* SMP - assuming writes are word atomic this is fine */ 1345 return process_group(current); 1346 } 1347 1348 #endif 1349 1350 asmlinkage long sys_getsid(pid_t pid) 1351 { 1352 if (!pid) { 1353 return current->signal->session; 1354 } else { 1355 int retval; 1356 struct task_struct *p; 1357 1358 read_lock(&tasklist_lock); 1359 p = find_task_by_pid(pid); 1360 1361 retval = -ESRCH; 1362 if(p) { 1363 retval = security_task_getsid(p); 1364 if (!retval) 1365 retval = p->signal->session; 1366 } 1367 read_unlock(&tasklist_lock); 1368 return retval; 1369 } 1370 } 1371 1372 asmlinkage long sys_setsid(void) 1373 { 1374 struct task_struct *group_leader = current->group_leader; 1375 pid_t session; 1376 int err = -EPERM; 1377 1378 mutex_lock(&tty_mutex); 1379 write_lock_irq(&tasklist_lock); 1380 1381 /* Fail if I am already a session leader */ 1382 if (group_leader->signal->leader) 1383 goto out; 1384 1385 session = group_leader->pid; 1386 /* Fail if a process group id already exists that equals the 1387 * proposed session id. 1388 * 1389 * Don't check if session id == 1 because kernel threads use this 1390 * session id and so the check will always fail and make it so 1391 * init cannot successfully call setsid. 1392 */ 1393 if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session)) 1394 goto out; 1395 1396 group_leader->signal->leader = 1; 1397 __set_special_pids(session, session); 1398 group_leader->signal->tty = NULL; 1399 group_leader->signal->tty_old_pgrp = 0; 1400 err = process_group(group_leader); 1401 out: 1402 write_unlock_irq(&tasklist_lock); 1403 mutex_unlock(&tty_mutex); 1404 return err; 1405 } 1406 1407 /* 1408 * Supplementary group IDs 1409 */ 1410 1411 /* init to 2 - one for init_task, one to ensure it is never freed */ 1412 struct group_info init_groups = { .usage = ATOMIC_INIT(2) }; 1413 1414 struct group_info *groups_alloc(int gidsetsize) 1415 { 1416 struct group_info *group_info; 1417 int nblocks; 1418 int i; 1419 1420 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK; 1421 /* Make sure we always allocate at least one indirect block pointer */ 1422 nblocks = nblocks ? : 1; 1423 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER); 1424 if (!group_info) 1425 return NULL; 1426 group_info->ngroups = gidsetsize; 1427 group_info->nblocks = nblocks; 1428 atomic_set(&group_info->usage, 1); 1429 1430 if (gidsetsize <= NGROUPS_SMALL) { 1431 group_info->blocks[0] = group_info->small_block; 1432 } else { 1433 for (i = 0; i < nblocks; i++) { 1434 gid_t *b; 1435 b = (void *)__get_free_page(GFP_USER); 1436 if (!b) 1437 goto out_undo_partial_alloc; 1438 group_info->blocks[i] = b; 1439 } 1440 } 1441 return group_info; 1442 1443 out_undo_partial_alloc: 1444 while (--i >= 0) { 1445 free_page((unsigned long)group_info->blocks[i]); 1446 } 1447 kfree(group_info); 1448 return NULL; 1449 } 1450 1451 EXPORT_SYMBOL(groups_alloc); 1452 1453 void groups_free(struct group_info *group_info) 1454 { 1455 if (group_info->blocks[0] != group_info->small_block) { 1456 int i; 1457 for (i = 0; i < group_info->nblocks; i++) 1458 free_page((unsigned long)group_info->blocks[i]); 1459 } 1460 kfree(group_info); 1461 } 1462 1463 EXPORT_SYMBOL(groups_free); 1464 1465 /* export the group_info to a user-space array */ 1466 static int groups_to_user(gid_t __user *grouplist, 1467 struct group_info *group_info) 1468 { 1469 int i; 1470 int count = group_info->ngroups; 1471 1472 for (i = 0; i < group_info->nblocks; i++) { 1473 int cp_count = min(NGROUPS_PER_BLOCK, count); 1474 int off = i * NGROUPS_PER_BLOCK; 1475 int len = cp_count * sizeof(*grouplist); 1476 1477 if (copy_to_user(grouplist+off, group_info->blocks[i], len)) 1478 return -EFAULT; 1479 1480 count -= cp_count; 1481 } 1482 return 0; 1483 } 1484 1485 /* fill a group_info from a user-space array - it must be allocated already */ 1486 static int groups_from_user(struct group_info *group_info, 1487 gid_t __user *grouplist) 1488 { 1489 int i; 1490 int count = group_info->ngroups; 1491 1492 for (i = 0; i < group_info->nblocks; i++) { 1493 int cp_count = min(NGROUPS_PER_BLOCK, count); 1494 int off = i * NGROUPS_PER_BLOCK; 1495 int len = cp_count * sizeof(*grouplist); 1496 1497 if (copy_from_user(group_info->blocks[i], grouplist+off, len)) 1498 return -EFAULT; 1499 1500 count -= cp_count; 1501 } 1502 return 0; 1503 } 1504 1505 /* a simple Shell sort */ 1506 static void groups_sort(struct group_info *group_info) 1507 { 1508 int base, max, stride; 1509 int gidsetsize = group_info->ngroups; 1510 1511 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1) 1512 ; /* nothing */ 1513 stride /= 3; 1514 1515 while (stride) { 1516 max = gidsetsize - stride; 1517 for (base = 0; base < max; base++) { 1518 int left = base; 1519 int right = left + stride; 1520 gid_t tmp = GROUP_AT(group_info, right); 1521 1522 while (left >= 0 && GROUP_AT(group_info, left) > tmp) { 1523 GROUP_AT(group_info, right) = 1524 GROUP_AT(group_info, left); 1525 right = left; 1526 left -= stride; 1527 } 1528 GROUP_AT(group_info, right) = tmp; 1529 } 1530 stride /= 3; 1531 } 1532 } 1533 1534 /* a simple bsearch */ 1535 int groups_search(struct group_info *group_info, gid_t grp) 1536 { 1537 unsigned int left, right; 1538 1539 if (!group_info) 1540 return 0; 1541 1542 left = 0; 1543 right = group_info->ngroups; 1544 while (left < right) { 1545 unsigned int mid = (left+right)/2; 1546 int cmp = grp - GROUP_AT(group_info, mid); 1547 if (cmp > 0) 1548 left = mid + 1; 1549 else if (cmp < 0) 1550 right = mid; 1551 else 1552 return 1; 1553 } 1554 return 0; 1555 } 1556 1557 /* validate and set current->group_info */ 1558 int set_current_groups(struct group_info *group_info) 1559 { 1560 int retval; 1561 struct group_info *old_info; 1562 1563 retval = security_task_setgroups(group_info); 1564 if (retval) 1565 return retval; 1566 1567 groups_sort(group_info); 1568 get_group_info(group_info); 1569 1570 task_lock(current); 1571 old_info = current->group_info; 1572 current->group_info = group_info; 1573 task_unlock(current); 1574 1575 put_group_info(old_info); 1576 1577 return 0; 1578 } 1579 1580 EXPORT_SYMBOL(set_current_groups); 1581 1582 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) 1583 { 1584 int i = 0; 1585 1586 /* 1587 * SMP: Nobody else can change our grouplist. Thus we are 1588 * safe. 1589 */ 1590 1591 if (gidsetsize < 0) 1592 return -EINVAL; 1593 1594 /* no need to grab task_lock here; it cannot change */ 1595 i = current->group_info->ngroups; 1596 if (gidsetsize) { 1597 if (i > gidsetsize) { 1598 i = -EINVAL; 1599 goto out; 1600 } 1601 if (groups_to_user(grouplist, current->group_info)) { 1602 i = -EFAULT; 1603 goto out; 1604 } 1605 } 1606 out: 1607 return i; 1608 } 1609 1610 /* 1611 * SMP: Our groups are copy-on-write. We can set them safely 1612 * without another task interfering. 1613 */ 1614 1615 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist) 1616 { 1617 struct group_info *group_info; 1618 int retval; 1619 1620 if (!capable(CAP_SETGID)) 1621 return -EPERM; 1622 if ((unsigned)gidsetsize > NGROUPS_MAX) 1623 return -EINVAL; 1624 1625 group_info = groups_alloc(gidsetsize); 1626 if (!group_info) 1627 return -ENOMEM; 1628 retval = groups_from_user(group_info, grouplist); 1629 if (retval) { 1630 put_group_info(group_info); 1631 return retval; 1632 } 1633 1634 retval = set_current_groups(group_info); 1635 put_group_info(group_info); 1636 1637 return retval; 1638 } 1639 1640 /* 1641 * Check whether we're fsgid/egid or in the supplemental group.. 1642 */ 1643 int in_group_p(gid_t grp) 1644 { 1645 int retval = 1; 1646 if (grp != current->fsgid) { 1647 retval = groups_search(current->group_info, grp); 1648 } 1649 return retval; 1650 } 1651 1652 EXPORT_SYMBOL(in_group_p); 1653 1654 int in_egroup_p(gid_t grp) 1655 { 1656 int retval = 1; 1657 if (grp != current->egid) { 1658 retval = groups_search(current->group_info, grp); 1659 } 1660 return retval; 1661 } 1662 1663 EXPORT_SYMBOL(in_egroup_p); 1664 1665 DECLARE_RWSEM(uts_sem); 1666 1667 EXPORT_SYMBOL(uts_sem); 1668 1669 asmlinkage long sys_newuname(struct new_utsname __user * name) 1670 { 1671 int errno = 0; 1672 1673 down_read(&uts_sem); 1674 if (copy_to_user(name,&system_utsname,sizeof *name)) 1675 errno = -EFAULT; 1676 up_read(&uts_sem); 1677 return errno; 1678 } 1679 1680 asmlinkage long sys_sethostname(char __user *name, int len) 1681 { 1682 int errno; 1683 char tmp[__NEW_UTS_LEN]; 1684 1685 if (!capable(CAP_SYS_ADMIN)) 1686 return -EPERM; 1687 if (len < 0 || len > __NEW_UTS_LEN) 1688 return -EINVAL; 1689 down_write(&uts_sem); 1690 errno = -EFAULT; 1691 if (!copy_from_user(tmp, name, len)) { 1692 memcpy(system_utsname.nodename, tmp, len); 1693 system_utsname.nodename[len] = 0; 1694 errno = 0; 1695 } 1696 up_write(&uts_sem); 1697 return errno; 1698 } 1699 1700 #ifdef __ARCH_WANT_SYS_GETHOSTNAME 1701 1702 asmlinkage long sys_gethostname(char __user *name, int len) 1703 { 1704 int i, errno; 1705 1706 if (len < 0) 1707 return -EINVAL; 1708 down_read(&uts_sem); 1709 i = 1 + strlen(system_utsname.nodename); 1710 if (i > len) 1711 i = len; 1712 errno = 0; 1713 if (copy_to_user(name, system_utsname.nodename, i)) 1714 errno = -EFAULT; 1715 up_read(&uts_sem); 1716 return errno; 1717 } 1718 1719 #endif 1720 1721 /* 1722 * Only setdomainname; getdomainname can be implemented by calling 1723 * uname() 1724 */ 1725 asmlinkage long sys_setdomainname(char __user *name, int len) 1726 { 1727 int errno; 1728 char tmp[__NEW_UTS_LEN]; 1729 1730 if (!capable(CAP_SYS_ADMIN)) 1731 return -EPERM; 1732 if (len < 0 || len > __NEW_UTS_LEN) 1733 return -EINVAL; 1734 1735 down_write(&uts_sem); 1736 errno = -EFAULT; 1737 if (!copy_from_user(tmp, name, len)) { 1738 memcpy(system_utsname.domainname, tmp, len); 1739 system_utsname.domainname[len] = 0; 1740 errno = 0; 1741 } 1742 up_write(&uts_sem); 1743 return errno; 1744 } 1745 1746 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim) 1747 { 1748 if (resource >= RLIM_NLIMITS) 1749 return -EINVAL; 1750 else { 1751 struct rlimit value; 1752 task_lock(current->group_leader); 1753 value = current->signal->rlim[resource]; 1754 task_unlock(current->group_leader); 1755 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; 1756 } 1757 } 1758 1759 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT 1760 1761 /* 1762 * Back compatibility for getrlimit. Needed for some apps. 1763 */ 1764 1765 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim) 1766 { 1767 struct rlimit x; 1768 if (resource >= RLIM_NLIMITS) 1769 return -EINVAL; 1770 1771 task_lock(current->group_leader); 1772 x = current->signal->rlim[resource]; 1773 task_unlock(current->group_leader); 1774 if(x.rlim_cur > 0x7FFFFFFF) 1775 x.rlim_cur = 0x7FFFFFFF; 1776 if(x.rlim_max > 0x7FFFFFFF) 1777 x.rlim_max = 0x7FFFFFFF; 1778 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0; 1779 } 1780 1781 #endif 1782 1783 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) 1784 { 1785 struct rlimit new_rlim, *old_rlim; 1786 unsigned long it_prof_secs; 1787 int retval; 1788 1789 if (resource >= RLIM_NLIMITS) 1790 return -EINVAL; 1791 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1792 return -EFAULT; 1793 if (new_rlim.rlim_cur > new_rlim.rlim_max) 1794 return -EINVAL; 1795 old_rlim = current->signal->rlim + resource; 1796 if ((new_rlim.rlim_max > old_rlim->rlim_max) && 1797 !capable(CAP_SYS_RESOURCE)) 1798 return -EPERM; 1799 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN) 1800 return -EPERM; 1801 1802 retval = security_task_setrlimit(resource, &new_rlim); 1803 if (retval) 1804 return retval; 1805 1806 task_lock(current->group_leader); 1807 *old_rlim = new_rlim; 1808 task_unlock(current->group_leader); 1809 1810 if (resource != RLIMIT_CPU) 1811 goto out; 1812 1813 /* 1814 * RLIMIT_CPU handling. Note that the kernel fails to return an error 1815 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a 1816 * very long-standing error, and fixing it now risks breakage of 1817 * applications, so we live with it 1818 */ 1819 if (new_rlim.rlim_cur == RLIM_INFINITY) 1820 goto out; 1821 1822 it_prof_secs = cputime_to_secs(current->signal->it_prof_expires); 1823 if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) { 1824 unsigned long rlim_cur = new_rlim.rlim_cur; 1825 cputime_t cputime; 1826 1827 if (rlim_cur == 0) { 1828 /* 1829 * The caller is asking for an immediate RLIMIT_CPU 1830 * expiry. But we use the zero value to mean "it was 1831 * never set". So let's cheat and make it one second 1832 * instead 1833 */ 1834 rlim_cur = 1; 1835 } 1836 cputime = secs_to_cputime(rlim_cur); 1837 read_lock(&tasklist_lock); 1838 spin_lock_irq(¤t->sighand->siglock); 1839 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); 1840 spin_unlock_irq(¤t->sighand->siglock); 1841 read_unlock(&tasklist_lock); 1842 } 1843 out: 1844 return 0; 1845 } 1846 1847 /* 1848 * It would make sense to put struct rusage in the task_struct, 1849 * except that would make the task_struct be *really big*. After 1850 * task_struct gets moved into malloc'ed memory, it would 1851 * make sense to do this. It will make moving the rest of the information 1852 * a lot simpler! (Which we're not doing right now because we're not 1853 * measuring them yet). 1854 * 1855 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have 1856 * races with threads incrementing their own counters. But since word 1857 * reads are atomic, we either get new values or old values and we don't 1858 * care which for the sums. We always take the siglock to protect reading 1859 * the c* fields from p->signal from races with exit.c updating those 1860 * fields when reaping, so a sample either gets all the additions of a 1861 * given child after it's reaped, or none so this sample is before reaping. 1862 * 1863 * tasklist_lock locking optimisation: 1864 * If we are current and single threaded, we do not need to take the tasklist 1865 * lock or the siglock. No one else can take our signal_struct away, 1866 * no one else can reap the children to update signal->c* counters, and 1867 * no one else can race with the signal-> fields. 1868 * If we do not take the tasklist_lock, the signal-> fields could be read 1869 * out of order while another thread was just exiting. So we place a 1870 * read memory barrier when we avoid the lock. On the writer side, 1871 * write memory barrier is implied in __exit_signal as __exit_signal releases 1872 * the siglock spinlock after updating the signal-> fields. 1873 * 1874 * We don't really need the siglock when we access the non c* fields 1875 * of the signal_struct (for RUSAGE_SELF) even in multithreaded 1876 * case, since we take the tasklist lock for read and the non c* signal-> 1877 * fields are updated only in __exit_signal, which is called with 1878 * tasklist_lock taken for write, hence these two threads cannot execute 1879 * concurrently. 1880 * 1881 */ 1882 1883 static void k_getrusage(struct task_struct *p, int who, struct rusage *r) 1884 { 1885 struct task_struct *t; 1886 unsigned long flags; 1887 cputime_t utime, stime; 1888 int need_lock = 0; 1889 1890 memset((char *) r, 0, sizeof *r); 1891 utime = stime = cputime_zero; 1892 1893 if (p != current || !thread_group_empty(p)) 1894 need_lock = 1; 1895 1896 if (need_lock) { 1897 read_lock(&tasklist_lock); 1898 if (unlikely(!p->signal)) { 1899 read_unlock(&tasklist_lock); 1900 return; 1901 } 1902 } else 1903 /* See locking comments above */ 1904 smp_rmb(); 1905 1906 switch (who) { 1907 case RUSAGE_BOTH: 1908 case RUSAGE_CHILDREN: 1909 spin_lock_irqsave(&p->sighand->siglock, flags); 1910 utime = p->signal->cutime; 1911 stime = p->signal->cstime; 1912 r->ru_nvcsw = p->signal->cnvcsw; 1913 r->ru_nivcsw = p->signal->cnivcsw; 1914 r->ru_minflt = p->signal->cmin_flt; 1915 r->ru_majflt = p->signal->cmaj_flt; 1916 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1917 1918 if (who == RUSAGE_CHILDREN) 1919 break; 1920 1921 case RUSAGE_SELF: 1922 utime = cputime_add(utime, p->signal->utime); 1923 stime = cputime_add(stime, p->signal->stime); 1924 r->ru_nvcsw += p->signal->nvcsw; 1925 r->ru_nivcsw += p->signal->nivcsw; 1926 r->ru_minflt += p->signal->min_flt; 1927 r->ru_majflt += p->signal->maj_flt; 1928 t = p; 1929 do { 1930 utime = cputime_add(utime, t->utime); 1931 stime = cputime_add(stime, t->stime); 1932 r->ru_nvcsw += t->nvcsw; 1933 r->ru_nivcsw += t->nivcsw; 1934 r->ru_minflt += t->min_flt; 1935 r->ru_majflt += t->maj_flt; 1936 t = next_thread(t); 1937 } while (t != p); 1938 break; 1939 1940 default: 1941 BUG(); 1942 } 1943 1944 if (need_lock) 1945 read_unlock(&tasklist_lock); 1946 cputime_to_timeval(utime, &r->ru_utime); 1947 cputime_to_timeval(stime, &r->ru_stime); 1948 } 1949 1950 int getrusage(struct task_struct *p, int who, struct rusage __user *ru) 1951 { 1952 struct rusage r; 1953 k_getrusage(p, who, &r); 1954 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1955 } 1956 1957 asmlinkage long sys_getrusage(int who, struct rusage __user *ru) 1958 { 1959 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) 1960 return -EINVAL; 1961 return getrusage(current, who, ru); 1962 } 1963 1964 asmlinkage long sys_umask(int mask) 1965 { 1966 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); 1967 return mask; 1968 } 1969 1970 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, 1971 unsigned long arg4, unsigned long arg5) 1972 { 1973 long error; 1974 1975 error = security_task_prctl(option, arg2, arg3, arg4, arg5); 1976 if (error) 1977 return error; 1978 1979 switch (option) { 1980 case PR_SET_PDEATHSIG: 1981 if (!valid_signal(arg2)) { 1982 error = -EINVAL; 1983 break; 1984 } 1985 current->pdeath_signal = arg2; 1986 break; 1987 case PR_GET_PDEATHSIG: 1988 error = put_user(current->pdeath_signal, (int __user *)arg2); 1989 break; 1990 case PR_GET_DUMPABLE: 1991 error = current->mm->dumpable; 1992 break; 1993 case PR_SET_DUMPABLE: 1994 if (arg2 < 0 || arg2 > 2) { 1995 error = -EINVAL; 1996 break; 1997 } 1998 current->mm->dumpable = arg2; 1999 break; 2000 2001 case PR_SET_UNALIGN: 2002 error = SET_UNALIGN_CTL(current, arg2); 2003 break; 2004 case PR_GET_UNALIGN: 2005 error = GET_UNALIGN_CTL(current, arg2); 2006 break; 2007 case PR_SET_FPEMU: 2008 error = SET_FPEMU_CTL(current, arg2); 2009 break; 2010 case PR_GET_FPEMU: 2011 error = GET_FPEMU_CTL(current, arg2); 2012 break; 2013 case PR_SET_FPEXC: 2014 error = SET_FPEXC_CTL(current, arg2); 2015 break; 2016 case PR_GET_FPEXC: 2017 error = GET_FPEXC_CTL(current, arg2); 2018 break; 2019 case PR_GET_TIMING: 2020 error = PR_TIMING_STATISTICAL; 2021 break; 2022 case PR_SET_TIMING: 2023 if (arg2 == PR_TIMING_STATISTICAL) 2024 error = 0; 2025 else 2026 error = -EINVAL; 2027 break; 2028 2029 case PR_GET_KEEPCAPS: 2030 if (current->keep_capabilities) 2031 error = 1; 2032 break; 2033 case PR_SET_KEEPCAPS: 2034 if (arg2 != 0 && arg2 != 1) { 2035 error = -EINVAL; 2036 break; 2037 } 2038 current->keep_capabilities = arg2; 2039 break; 2040 case PR_SET_NAME: { 2041 struct task_struct *me = current; 2042 unsigned char ncomm[sizeof(me->comm)]; 2043 2044 ncomm[sizeof(me->comm)-1] = 0; 2045 if (strncpy_from_user(ncomm, (char __user *)arg2, 2046 sizeof(me->comm)-1) < 0) 2047 return -EFAULT; 2048 set_task_comm(me, ncomm); 2049 return 0; 2050 } 2051 case PR_GET_NAME: { 2052 struct task_struct *me = current; 2053 unsigned char tcomm[sizeof(me->comm)]; 2054 2055 get_task_comm(tcomm, me); 2056 if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm))) 2057 return -EFAULT; 2058 return 0; 2059 } 2060 default: 2061 error = -EINVAL; 2062 break; 2063 } 2064 return error; 2065 } 2066