1 /* 2 * linux/fs/fcntl.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/syscalls.h> 8 #include <linux/init.h> 9 #include <linux/mm.h> 10 #include <linux/fs.h> 11 #include <linux/file.h> 12 #include <linux/fdtable.h> 13 #include <linux/capability.h> 14 #include <linux/dnotify.h> 15 #include <linux/slab.h> 16 #include <linux/module.h> 17 #include <linux/security.h> 18 #include <linux/ptrace.h> 19 #include <linux/signal.h> 20 #include <linux/rcupdate.h> 21 #include <linux/pid_namespace.h> 22 23 #include <asm/poll.h> 24 #include <asm/siginfo.h> 25 #include <asm/uaccess.h> 26 27 void set_close_on_exec(unsigned int fd, int flag) 28 { 29 struct files_struct *files = current->files; 30 struct fdtable *fdt; 31 spin_lock(&files->file_lock); 32 fdt = files_fdtable(files); 33 if (flag) 34 FD_SET(fd, fdt->close_on_exec); 35 else 36 FD_CLR(fd, fdt->close_on_exec); 37 spin_unlock(&files->file_lock); 38 } 39 40 static int get_close_on_exec(unsigned int fd) 41 { 42 struct files_struct *files = current->files; 43 struct fdtable *fdt; 44 int res; 45 rcu_read_lock(); 46 fdt = files_fdtable(files); 47 res = FD_ISSET(fd, fdt->close_on_exec); 48 rcu_read_unlock(); 49 return res; 50 } 51 52 asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags) 53 { 54 int err = -EBADF; 55 struct file * file, *tofree; 56 struct files_struct * files = current->files; 57 struct fdtable *fdt; 58 59 if ((flags & ~O_CLOEXEC) != 0) 60 return -EINVAL; 61 62 if (unlikely(oldfd == newfd)) 63 return -EINVAL; 64 65 spin_lock(&files->file_lock); 66 err = expand_files(files, newfd); 67 file = fcheck(oldfd); 68 if (unlikely(!file)) 69 goto Ebadf; 70 if (unlikely(err < 0)) { 71 if (err == -EMFILE) 72 goto Ebadf; 73 goto out_unlock; 74 } 75 /* 76 * We need to detect attempts to do dup2() over allocated but still 77 * not finished descriptor. NB: OpenBSD avoids that at the price of 78 * extra work in their equivalent of fget() - they insert struct 79 * file immediately after grabbing descriptor, mark it larval if 80 * more work (e.g. actual opening) is needed and make sure that 81 * fget() treats larval files as absent. Potentially interesting, 82 * but while extra work in fget() is trivial, locking implications 83 * and amount of surgery on open()-related paths in VFS are not. 84 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution" 85 * deadlocks in rather amusing ways, AFAICS. All of that is out of 86 * scope of POSIX or SUS, since neither considers shared descriptor 87 * tables and this condition does not arise without those. 88 */ 89 err = -EBUSY; 90 fdt = files_fdtable(files); 91 tofree = fdt->fd[newfd]; 92 if (!tofree && FD_ISSET(newfd, fdt->open_fds)) 93 goto out_unlock; 94 get_file(file); 95 rcu_assign_pointer(fdt->fd[newfd], file); 96 FD_SET(newfd, fdt->open_fds); 97 if (flags & O_CLOEXEC) 98 FD_SET(newfd, fdt->close_on_exec); 99 else 100 FD_CLR(newfd, fdt->close_on_exec); 101 spin_unlock(&files->file_lock); 102 103 if (tofree) 104 filp_close(tofree, files); 105 106 return newfd; 107 108 Ebadf: 109 err = -EBADF; 110 out_unlock: 111 spin_unlock(&files->file_lock); 112 return err; 113 } 114 115 asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd) 116 { 117 if (unlikely(newfd == oldfd)) { /* corner case */ 118 struct files_struct *files = current->files; 119 rcu_read_lock(); 120 if (!fcheck_files(files, oldfd)) 121 oldfd = -EBADF; 122 rcu_read_unlock(); 123 return oldfd; 124 } 125 return sys_dup3(oldfd, newfd, 0); 126 } 127 128 asmlinkage long sys_dup(unsigned int fildes) 129 { 130 int ret = -EBADF; 131 struct file *file = fget(fildes); 132 133 if (file) { 134 ret = get_unused_fd(); 135 if (ret >= 0) 136 fd_install(ret, file); 137 else 138 fput(file); 139 } 140 return ret; 141 } 142 143 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME) 144 145 static int setfl(int fd, struct file * filp, unsigned long arg) 146 { 147 struct inode * inode = filp->f_path.dentry->d_inode; 148 int error = 0; 149 150 /* 151 * O_APPEND cannot be cleared if the file is marked as append-only 152 * and the file is open for write. 153 */ 154 if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode)) 155 return -EPERM; 156 157 /* O_NOATIME can only be set by the owner or superuser */ 158 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME)) 159 if (!is_owner_or_cap(inode)) 160 return -EPERM; 161 162 /* required for strict SunOS emulation */ 163 if (O_NONBLOCK != O_NDELAY) 164 if (arg & O_NDELAY) 165 arg |= O_NONBLOCK; 166 167 if (arg & O_DIRECT) { 168 if (!filp->f_mapping || !filp->f_mapping->a_ops || 169 !filp->f_mapping->a_ops->direct_IO) 170 return -EINVAL; 171 } 172 173 if (filp->f_op && filp->f_op->check_flags) 174 error = filp->f_op->check_flags(arg); 175 if (error) 176 return error; 177 178 if ((arg ^ filp->f_flags) & FASYNC) { 179 if (filp->f_op && filp->f_op->fasync) { 180 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0); 181 if (error < 0) 182 goto out; 183 } 184 } 185 186 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK); 187 out: 188 return error; 189 } 190 191 static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, 192 uid_t uid, uid_t euid, int force) 193 { 194 write_lock_irq(&filp->f_owner.lock); 195 if (force || !filp->f_owner.pid) { 196 put_pid(filp->f_owner.pid); 197 filp->f_owner.pid = get_pid(pid); 198 filp->f_owner.pid_type = type; 199 filp->f_owner.uid = uid; 200 filp->f_owner.euid = euid; 201 } 202 write_unlock_irq(&filp->f_owner.lock); 203 } 204 205 int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, 206 int force) 207 { 208 int err; 209 210 err = security_file_set_fowner(filp); 211 if (err) 212 return err; 213 214 f_modown(filp, pid, type, current->uid, current->euid, force); 215 return 0; 216 } 217 EXPORT_SYMBOL(__f_setown); 218 219 int f_setown(struct file *filp, unsigned long arg, int force) 220 { 221 enum pid_type type; 222 struct pid *pid; 223 int who = arg; 224 int result; 225 type = PIDTYPE_PID; 226 if (who < 0) { 227 type = PIDTYPE_PGID; 228 who = -who; 229 } 230 rcu_read_lock(); 231 pid = find_vpid(who); 232 result = __f_setown(filp, pid, type, force); 233 rcu_read_unlock(); 234 return result; 235 } 236 EXPORT_SYMBOL(f_setown); 237 238 void f_delown(struct file *filp) 239 { 240 f_modown(filp, NULL, PIDTYPE_PID, 0, 0, 1); 241 } 242 243 pid_t f_getown(struct file *filp) 244 { 245 pid_t pid; 246 read_lock(&filp->f_owner.lock); 247 pid = pid_vnr(filp->f_owner.pid); 248 if (filp->f_owner.pid_type == PIDTYPE_PGID) 249 pid = -pid; 250 read_unlock(&filp->f_owner.lock); 251 return pid; 252 } 253 254 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, 255 struct file *filp) 256 { 257 long err = -EINVAL; 258 259 switch (cmd) { 260 case F_DUPFD: 261 case F_DUPFD_CLOEXEC: 262 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) 263 break; 264 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0); 265 if (err >= 0) { 266 get_file(filp); 267 fd_install(err, filp); 268 } 269 break; 270 case F_GETFD: 271 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0; 272 break; 273 case F_SETFD: 274 err = 0; 275 set_close_on_exec(fd, arg & FD_CLOEXEC); 276 break; 277 case F_GETFL: 278 err = filp->f_flags; 279 break; 280 case F_SETFL: 281 err = setfl(fd, filp, arg); 282 break; 283 case F_GETLK: 284 err = fcntl_getlk(filp, (struct flock __user *) arg); 285 break; 286 case F_SETLK: 287 case F_SETLKW: 288 err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg); 289 break; 290 case F_GETOWN: 291 /* 292 * XXX If f_owner is a process group, the 293 * negative return value will get converted 294 * into an error. Oops. If we keep the 295 * current syscall conventions, the only way 296 * to fix this will be in libc. 297 */ 298 err = f_getown(filp); 299 force_successful_syscall_return(); 300 break; 301 case F_SETOWN: 302 err = f_setown(filp, arg, 1); 303 break; 304 case F_GETSIG: 305 err = filp->f_owner.signum; 306 break; 307 case F_SETSIG: 308 /* arg == 0 restores default behaviour. */ 309 if (!valid_signal(arg)) { 310 break; 311 } 312 err = 0; 313 filp->f_owner.signum = arg; 314 break; 315 case F_GETLEASE: 316 err = fcntl_getlease(filp); 317 break; 318 case F_SETLEASE: 319 err = fcntl_setlease(fd, filp, arg); 320 break; 321 case F_NOTIFY: 322 err = fcntl_dirnotify(fd, filp, arg); 323 break; 324 default: 325 break; 326 } 327 return err; 328 } 329 330 asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) 331 { 332 struct file *filp; 333 long err = -EBADF; 334 335 filp = fget(fd); 336 if (!filp) 337 goto out; 338 339 err = security_file_fcntl(filp, cmd, arg); 340 if (err) { 341 fput(filp); 342 return err; 343 } 344 345 err = do_fcntl(fd, cmd, arg, filp); 346 347 fput(filp); 348 out: 349 return err; 350 } 351 352 #if BITS_PER_LONG == 32 353 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg) 354 { 355 struct file * filp; 356 long err; 357 358 err = -EBADF; 359 filp = fget(fd); 360 if (!filp) 361 goto out; 362 363 err = security_file_fcntl(filp, cmd, arg); 364 if (err) { 365 fput(filp); 366 return err; 367 } 368 err = -EBADF; 369 370 switch (cmd) { 371 case F_GETLK64: 372 err = fcntl_getlk64(filp, (struct flock64 __user *) arg); 373 break; 374 case F_SETLK64: 375 case F_SETLKW64: 376 err = fcntl_setlk64(fd, filp, cmd, 377 (struct flock64 __user *) arg); 378 break; 379 default: 380 err = do_fcntl(fd, cmd, arg, filp); 381 break; 382 } 383 fput(filp); 384 out: 385 return err; 386 } 387 #endif 388 389 /* Table to convert sigio signal codes into poll band bitmaps */ 390 391 static const long band_table[NSIGPOLL] = { 392 POLLIN | POLLRDNORM, /* POLL_IN */ 393 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ 394 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ 395 POLLERR, /* POLL_ERR */ 396 POLLPRI | POLLRDBAND, /* POLL_PRI */ 397 POLLHUP | POLLERR /* POLL_HUP */ 398 }; 399 400 static inline int sigio_perm(struct task_struct *p, 401 struct fown_struct *fown, int sig) 402 { 403 return (((fown->euid == 0) || 404 (fown->euid == p->suid) || (fown->euid == p->uid) || 405 (fown->uid == p->suid) || (fown->uid == p->uid)) && 406 !security_file_send_sigiotask(p, fown, sig)); 407 } 408 409 static void send_sigio_to_task(struct task_struct *p, 410 struct fown_struct *fown, 411 int fd, 412 int reason) 413 { 414 if (!sigio_perm(p, fown, fown->signum)) 415 return; 416 417 switch (fown->signum) { 418 siginfo_t si; 419 default: 420 /* Queue a rt signal with the appropriate fd as its 421 value. We use SI_SIGIO as the source, not 422 SI_KERNEL, since kernel signals always get 423 delivered even if we can't queue. Failure to 424 queue in this case _should_ be reported; we fall 425 back to SIGIO in that case. --sct */ 426 si.si_signo = fown->signum; 427 si.si_errno = 0; 428 si.si_code = reason; 429 /* Make sure we are called with one of the POLL_* 430 reasons, otherwise we could leak kernel stack into 431 userspace. */ 432 BUG_ON((reason & __SI_MASK) != __SI_POLL); 433 if (reason - POLL_IN >= NSIGPOLL) 434 si.si_band = ~0L; 435 else 436 si.si_band = band_table[reason - POLL_IN]; 437 si.si_fd = fd; 438 if (!group_send_sig_info(fown->signum, &si, p)) 439 break; 440 /* fall-through: fall back on the old plain SIGIO signal */ 441 case 0: 442 group_send_sig_info(SIGIO, SEND_SIG_PRIV, p); 443 } 444 } 445 446 void send_sigio(struct fown_struct *fown, int fd, int band) 447 { 448 struct task_struct *p; 449 enum pid_type type; 450 struct pid *pid; 451 452 read_lock(&fown->lock); 453 type = fown->pid_type; 454 pid = fown->pid; 455 if (!pid) 456 goto out_unlock_fown; 457 458 read_lock(&tasklist_lock); 459 do_each_pid_task(pid, type, p) { 460 send_sigio_to_task(p, fown, fd, band); 461 } while_each_pid_task(pid, type, p); 462 read_unlock(&tasklist_lock); 463 out_unlock_fown: 464 read_unlock(&fown->lock); 465 } 466 467 static void send_sigurg_to_task(struct task_struct *p, 468 struct fown_struct *fown) 469 { 470 if (sigio_perm(p, fown, SIGURG)) 471 group_send_sig_info(SIGURG, SEND_SIG_PRIV, p); 472 } 473 474 int send_sigurg(struct fown_struct *fown) 475 { 476 struct task_struct *p; 477 enum pid_type type; 478 struct pid *pid; 479 int ret = 0; 480 481 read_lock(&fown->lock); 482 type = fown->pid_type; 483 pid = fown->pid; 484 if (!pid) 485 goto out_unlock_fown; 486 487 ret = 1; 488 489 read_lock(&tasklist_lock); 490 do_each_pid_task(pid, type, p) { 491 send_sigurg_to_task(p, fown); 492 } while_each_pid_task(pid, type, p); 493 read_unlock(&tasklist_lock); 494 out_unlock_fown: 495 read_unlock(&fown->lock); 496 return ret; 497 } 498 499 static DEFINE_RWLOCK(fasync_lock); 500 static struct kmem_cache *fasync_cache __read_mostly; 501 502 /* 503 * fasync_helper() is used by some character device drivers (mainly mice) 504 * to set up the fasync queue. It returns negative on error, 0 if it did 505 * no changes and positive if it added/deleted the entry. 506 */ 507 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) 508 { 509 struct fasync_struct *fa, **fp; 510 struct fasync_struct *new = NULL; 511 int result = 0; 512 513 if (on) { 514 new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); 515 if (!new) 516 return -ENOMEM; 517 } 518 write_lock_irq(&fasync_lock); 519 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { 520 if (fa->fa_file == filp) { 521 if(on) { 522 fa->fa_fd = fd; 523 kmem_cache_free(fasync_cache, new); 524 } else { 525 *fp = fa->fa_next; 526 kmem_cache_free(fasync_cache, fa); 527 result = 1; 528 } 529 goto out; 530 } 531 } 532 533 if (on) { 534 new->magic = FASYNC_MAGIC; 535 new->fa_file = filp; 536 new->fa_fd = fd; 537 new->fa_next = *fapp; 538 *fapp = new; 539 result = 1; 540 } 541 out: 542 write_unlock_irq(&fasync_lock); 543 return result; 544 } 545 546 EXPORT_SYMBOL(fasync_helper); 547 548 void __kill_fasync(struct fasync_struct *fa, int sig, int band) 549 { 550 while (fa) { 551 struct fown_struct * fown; 552 if (fa->magic != FASYNC_MAGIC) { 553 printk(KERN_ERR "kill_fasync: bad magic number in " 554 "fasync_struct!\n"); 555 return; 556 } 557 fown = &fa->fa_file->f_owner; 558 /* Don't send SIGURG to processes which have not set a 559 queued signum: SIGURG has its own default signalling 560 mechanism. */ 561 if (!(sig == SIGURG && fown->signum == 0)) 562 send_sigio(fown, fa->fa_fd, band); 563 fa = fa->fa_next; 564 } 565 } 566 567 EXPORT_SYMBOL(__kill_fasync); 568 569 void kill_fasync(struct fasync_struct **fp, int sig, int band) 570 { 571 /* First a quick test without locking: usually 572 * the list is empty. 573 */ 574 if (*fp) { 575 read_lock(&fasync_lock); 576 /* reread *fp after obtaining the lock */ 577 __kill_fasync(*fp, sig, band); 578 read_unlock(&fasync_lock); 579 } 580 } 581 EXPORT_SYMBOL(kill_fasync); 582 583 static int __init fasync_init(void) 584 { 585 fasync_cache = kmem_cache_create("fasync_cache", 586 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL); 587 return 0; 588 } 589 590 module_init(fasync_init) 591