1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/file.c 4 * 5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes 6 * 7 * Manage the dynamic fd arrays in the process files_struct. 8 */ 9 10 #include <linux/syscalls.h> 11 #include <linux/export.h> 12 #include <linux/fs.h> 13 #include <linux/kernel.h> 14 #include <linux/mm.h> 15 #include <linux/sched/signal.h> 16 #include <linux/slab.h> 17 #include <linux/file.h> 18 #include <linux/fdtable.h> 19 #include <linux/bitops.h> 20 #include <linux/spinlock.h> 21 #include <linux/rcupdate.h> 22 #include <linux/close_range.h> 23 #include <linux/file_ref.h> 24 #include <net/sock.h> 25 #include <linux/init_task.h> 26 27 #include "internal.h" 28 29 /** 30 * __file_ref_put - Slowpath of file_ref_put() 31 * @ref: Pointer to the reference count 32 * @cnt: Current reference count 33 * 34 * Invoked when the reference count is outside of the valid zone. 35 * 36 * Return: 37 * True if this was the last reference with no future references 38 * possible. This signals the caller that it can safely schedule the 39 * object, which is protected by the reference counter, for 40 * deconstruction. 41 * 42 * False if there are still active references or the put() raced 43 * with a concurrent get()/put() pair. Caller is not allowed to 44 * deconstruct the protected object. 45 */ 46 bool __file_ref_put(file_ref_t *ref, unsigned long cnt) 47 { 48 /* Did this drop the last reference? */ 49 if (likely(cnt == FILE_REF_NOREF)) { 50 /* 51 * Carefully try to set the reference count to FILE_REF_DEAD. 52 * 53 * This can fail if a concurrent get() operation has 54 * elevated it again or the corresponding put() even marked 55 * it dead already. Both are valid situations and do not 56 * require a retry. If this fails the caller is not 57 * allowed to deconstruct the object. 58 */ 59 if (!atomic_long_try_cmpxchg_release(&ref->refcnt, &cnt, FILE_REF_DEAD)) 60 return false; 61 62 /* 63 * The caller can safely schedule the object for 64 * deconstruction. Provide acquire ordering. 65 */ 66 smp_acquire__after_ctrl_dep(); 67 return true; 68 } 69 70 /* 71 * If the reference count was already in the dead zone, then this 72 * put() operation is imbalanced. Warn, put the reference count back to 73 * DEAD and tell the caller to not deconstruct the object. 74 */ 75 if (WARN_ONCE(cnt >= FILE_REF_RELEASED, "imbalanced put on file reference count")) { 76 atomic_long_set(&ref->refcnt, FILE_REF_DEAD); 77 return false; 78 } 79 80 /* 81 * This is a put() operation on a saturated refcount. Restore the 82 * mean saturation value and tell the caller to not deconstruct the 83 * object. 84 */ 85 if (cnt > FILE_REF_MAXREF) 86 atomic_long_set(&ref->refcnt, FILE_REF_SATURATED); 87 return false; 88 } 89 EXPORT_SYMBOL_GPL(__file_ref_put); 90 91 unsigned int sysctl_nr_open __read_mostly = 1024*1024; 92 unsigned int sysctl_nr_open_min = BITS_PER_LONG; 93 /* our min() is unusable in constant expressions ;-/ */ 94 #define __const_min(x, y) ((x) < (y) ? (x) : (y)) 95 unsigned int sysctl_nr_open_max = 96 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG; 97 98 static void __free_fdtable(struct fdtable *fdt) 99 { 100 kvfree(fdt->fd); 101 kvfree(fdt->open_fds); 102 kfree(fdt); 103 } 104 105 static void free_fdtable_rcu(struct rcu_head *rcu) 106 { 107 __free_fdtable(container_of(rcu, struct fdtable, rcu)); 108 } 109 110 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr)) 111 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long)) 112 113 #define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds 114 /* 115 * Copy 'count' fd bits from the old table to the new table and clear the extra 116 * space if any. This does not copy the file pointers. Called with the files 117 * spinlock held for write. 118 */ 119 static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, 120 unsigned int copy_words) 121 { 122 unsigned int nwords = fdt_words(nfdt); 123 124 bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds, 125 copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG); 126 bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec, 127 copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG); 128 bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits, 129 copy_words, nwords); 130 } 131 132 /* 133 * Copy all file descriptors from the old table to the new, expanded table and 134 * clear the extra space. Called with the files spinlock held for write. 135 */ 136 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) 137 { 138 size_t cpy, set; 139 140 BUG_ON(nfdt->max_fds < ofdt->max_fds); 141 142 cpy = ofdt->max_fds * sizeof(struct file *); 143 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); 144 memcpy(nfdt->fd, ofdt->fd, cpy); 145 memset((char *)nfdt->fd + cpy, 0, set); 146 147 copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt)); 148 } 149 150 /* 151 * Note how the fdtable bitmap allocations very much have to be a multiple of 152 * BITS_PER_LONG. This is not only because we walk those things in chunks of 153 * 'unsigned long' in some places, but simply because that is how the Linux 154 * kernel bitmaps are defined to work: they are not "bits in an array of bytes", 155 * they are very much "bits in an array of unsigned long". 156 */ 157 static struct fdtable *alloc_fdtable(unsigned int slots_wanted) 158 { 159 struct fdtable *fdt; 160 unsigned int nr; 161 void *data; 162 163 /* 164 * Figure out how many fds we actually want to support in this fdtable. 165 * Allocation steps are keyed to the size of the fdarray, since it 166 * grows far faster than any of the other dynamic data. We try to fit 167 * the fdarray into comfortable page-tuned chunks: starting at 1024B 168 * and growing in powers of two from there on. Since we called only 169 * with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab 170 * already gives BITS_PER_LONG slots), the above boils down to 171 * 1. use the smallest power of two large enough to give us that many 172 * slots. 173 * 2. on 32bit skip 64 and 128 - the minimal capacity we want there is 174 * 256 slots (i.e. 1Kb fd array). 175 * 3. on 64bit don't skip anything, 1Kb fd array means 128 slots there 176 * and we are never going to be asked for 64 or less. 177 */ 178 if (IS_ENABLED(CONFIG_32BIT) && slots_wanted < 256) 179 nr = 256; 180 else 181 nr = roundup_pow_of_two(slots_wanted); 182 /* 183 * Note that this can drive nr *below* what we had passed if sysctl_nr_open 184 * had been set lower between the check in expand_files() and here. 185 * 186 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise 187 * bitmaps handling below becomes unpleasant, to put it mildly... 188 */ 189 if (unlikely(nr > sysctl_nr_open)) { 190 nr = round_down(sysctl_nr_open, BITS_PER_LONG); 191 if (nr < slots_wanted) 192 return ERR_PTR(-EMFILE); 193 } 194 195 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT); 196 if (!fdt) 197 goto out; 198 fdt->max_fds = nr; 199 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT); 200 if (!data) 201 goto out_fdt; 202 fdt->fd = data; 203 204 data = kvmalloc(max_t(size_t, 205 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES), 206 GFP_KERNEL_ACCOUNT); 207 if (!data) 208 goto out_arr; 209 fdt->open_fds = data; 210 data += nr / BITS_PER_BYTE; 211 fdt->close_on_exec = data; 212 data += nr / BITS_PER_BYTE; 213 fdt->full_fds_bits = data; 214 215 return fdt; 216 217 out_arr: 218 kvfree(fdt->fd); 219 out_fdt: 220 kfree(fdt); 221 out: 222 return ERR_PTR(-ENOMEM); 223 } 224 225 /* 226 * Expand the file descriptor table. 227 * This function will allocate a new fdtable and both fd array and fdset, of 228 * the given size. 229 * Return <0 error code on error; 0 on successful completion. 230 * The files->file_lock should be held on entry, and will be held on exit. 231 */ 232 static int expand_fdtable(struct files_struct *files, unsigned int nr) 233 __releases(files->file_lock) 234 __acquires(files->file_lock) 235 { 236 struct fdtable *new_fdt, *cur_fdt; 237 238 spin_unlock(&files->file_lock); 239 new_fdt = alloc_fdtable(nr + 1); 240 241 /* make sure all fd_install() have seen resize_in_progress 242 * or have finished their rcu_read_lock_sched() section. 243 */ 244 if (atomic_read(&files->count) > 1) 245 synchronize_rcu(); 246 247 spin_lock(&files->file_lock); 248 if (IS_ERR(new_fdt)) 249 return PTR_ERR(new_fdt); 250 cur_fdt = files_fdtable(files); 251 BUG_ON(nr < cur_fdt->max_fds); 252 copy_fdtable(new_fdt, cur_fdt); 253 rcu_assign_pointer(files->fdt, new_fdt); 254 if (cur_fdt != &files->fdtab) 255 call_rcu(&cur_fdt->rcu, free_fdtable_rcu); 256 /* coupled with smp_rmb() in fd_install() */ 257 smp_wmb(); 258 return 0; 259 } 260 261 /* 262 * Expand files. 263 * This function will expand the file structures, if the requested size exceeds 264 * the current capacity and there is room for expansion. 265 * Return <0 error code on error; 0 on success. 266 * The files->file_lock should be held on entry, and will be held on exit. 267 */ 268 static int expand_files(struct files_struct *files, unsigned int nr) 269 __releases(files->file_lock) 270 __acquires(files->file_lock) 271 { 272 struct fdtable *fdt; 273 int error; 274 275 repeat: 276 fdt = files_fdtable(files); 277 278 /* Do we need to expand? */ 279 if (nr < fdt->max_fds) 280 return 0; 281 282 /* Can we expand? */ 283 if (nr >= sysctl_nr_open) 284 return -EMFILE; 285 286 if (unlikely(files->resize_in_progress)) { 287 spin_unlock(&files->file_lock); 288 wait_event(files->resize_wait, !files->resize_in_progress); 289 spin_lock(&files->file_lock); 290 goto repeat; 291 } 292 293 /* All good, so we try */ 294 files->resize_in_progress = true; 295 error = expand_fdtable(files, nr); 296 files->resize_in_progress = false; 297 298 wake_up_all(&files->resize_wait); 299 return error; 300 } 301 302 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt, 303 bool set) 304 { 305 if (set) { 306 __set_bit(fd, fdt->close_on_exec); 307 } else { 308 if (test_bit(fd, fdt->close_on_exec)) 309 __clear_bit(fd, fdt->close_on_exec); 310 } 311 } 312 313 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt, bool set) 314 { 315 __set_bit(fd, fdt->open_fds); 316 __set_close_on_exec(fd, fdt, set); 317 fd /= BITS_PER_LONG; 318 if (!~fdt->open_fds[fd]) 319 __set_bit(fd, fdt->full_fds_bits); 320 } 321 322 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt) 323 { 324 __clear_bit(fd, fdt->open_fds); 325 fd /= BITS_PER_LONG; 326 if (test_bit(fd, fdt->full_fds_bits)) 327 __clear_bit(fd, fdt->full_fds_bits); 328 } 329 330 static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt) 331 { 332 return test_bit(fd, fdt->open_fds); 333 } 334 335 /* 336 * Note that a sane fdtable size always has to be a multiple of 337 * BITS_PER_LONG, since we have bitmaps that are sized by this. 338 * 339 * punch_hole is optional - when close_range() is asked to unshare 340 * and close, we don't need to copy descriptors in that range, so 341 * a smaller cloned descriptor table might suffice if the last 342 * currently opened descriptor falls into that range. 343 */ 344 static unsigned int sane_fdtable_size(struct fdtable *fdt, struct fd_range *punch_hole) 345 { 346 unsigned int last = find_last_bit(fdt->open_fds, fdt->max_fds); 347 348 if (last == fdt->max_fds) 349 return NR_OPEN_DEFAULT; 350 if (punch_hole && punch_hole->to >= last && punch_hole->from <= last) { 351 last = find_last_bit(fdt->open_fds, punch_hole->from); 352 if (last == punch_hole->from) 353 return NR_OPEN_DEFAULT; 354 } 355 return ALIGN(last + 1, BITS_PER_LONG); 356 } 357 358 /* 359 * Allocate a new descriptor table and copy contents from the passed in 360 * instance. Returns a pointer to cloned table on success, ERR_PTR() 361 * on failure. For 'punch_hole' see sane_fdtable_size(). 362 */ 363 struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_hole) 364 { 365 struct files_struct *newf; 366 struct file **old_fds, **new_fds; 367 unsigned int open_files, i; 368 struct fdtable *old_fdt, *new_fdt; 369 370 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); 371 if (!newf) 372 return ERR_PTR(-ENOMEM); 373 374 atomic_set(&newf->count, 1); 375 376 spin_lock_init(&newf->file_lock); 377 newf->resize_in_progress = false; 378 init_waitqueue_head(&newf->resize_wait); 379 newf->next_fd = 0; 380 new_fdt = &newf->fdtab; 381 new_fdt->max_fds = NR_OPEN_DEFAULT; 382 new_fdt->close_on_exec = newf->close_on_exec_init; 383 new_fdt->open_fds = newf->open_fds_init; 384 new_fdt->full_fds_bits = newf->full_fds_bits_init; 385 new_fdt->fd = &newf->fd_array[0]; 386 387 spin_lock(&oldf->file_lock); 388 old_fdt = files_fdtable(oldf); 389 open_files = sane_fdtable_size(old_fdt, punch_hole); 390 391 /* 392 * Check whether we need to allocate a larger fd array and fd set. 393 */ 394 while (unlikely(open_files > new_fdt->max_fds)) { 395 spin_unlock(&oldf->file_lock); 396 397 if (new_fdt != &newf->fdtab) 398 __free_fdtable(new_fdt); 399 400 new_fdt = alloc_fdtable(open_files); 401 if (IS_ERR(new_fdt)) { 402 kmem_cache_free(files_cachep, newf); 403 return ERR_CAST(new_fdt); 404 } 405 406 /* 407 * Reacquire the oldf lock and a pointer to its fd table 408 * who knows it may have a new bigger fd table. We need 409 * the latest pointer. 410 */ 411 spin_lock(&oldf->file_lock); 412 old_fdt = files_fdtable(oldf); 413 open_files = sane_fdtable_size(old_fdt, punch_hole); 414 } 415 416 copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG); 417 418 old_fds = old_fdt->fd; 419 new_fds = new_fdt->fd; 420 421 for (i = open_files; i != 0; i--) { 422 struct file *f = *old_fds++; 423 if (f) { 424 get_file(f); 425 } else { 426 /* 427 * The fd may be claimed in the fd bitmap but not yet 428 * instantiated in the files array if a sibling thread 429 * is partway through open(). So make sure that this 430 * fd is available to the new process. 431 */ 432 __clear_open_fd(open_files - i, new_fdt); 433 } 434 rcu_assign_pointer(*new_fds++, f); 435 } 436 spin_unlock(&oldf->file_lock); 437 438 /* clear the remainder */ 439 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *)); 440 441 rcu_assign_pointer(newf->fdt, new_fdt); 442 443 return newf; 444 } 445 446 static struct fdtable *close_files(struct files_struct * files) 447 { 448 /* 449 * It is safe to dereference the fd table without RCU or 450 * ->file_lock because this is the last reference to the 451 * files structure. 452 */ 453 struct fdtable *fdt = rcu_dereference_raw(files->fdt); 454 unsigned int i, j = 0; 455 456 for (;;) { 457 unsigned long set; 458 i = j * BITS_PER_LONG; 459 if (i >= fdt->max_fds) 460 break; 461 set = fdt->open_fds[j++]; 462 while (set) { 463 if (set & 1) { 464 struct file *file = fdt->fd[i]; 465 if (file) { 466 filp_close(file, files); 467 cond_resched(); 468 } 469 } 470 i++; 471 set >>= 1; 472 } 473 } 474 475 return fdt; 476 } 477 478 void put_files_struct(struct files_struct *files) 479 { 480 if (atomic_dec_and_test(&files->count)) { 481 struct fdtable *fdt = close_files(files); 482 483 /* free the arrays if they are not embedded */ 484 if (fdt != &files->fdtab) 485 __free_fdtable(fdt); 486 kmem_cache_free(files_cachep, files); 487 } 488 } 489 490 void exit_files(struct task_struct *tsk) 491 { 492 struct files_struct * files = tsk->files; 493 494 if (files) { 495 task_lock(tsk); 496 tsk->files = NULL; 497 task_unlock(tsk); 498 put_files_struct(files); 499 } 500 } 501 502 struct files_struct init_files = { 503 .count = ATOMIC_INIT(1), 504 .fdt = &init_files.fdtab, 505 .fdtab = { 506 .max_fds = NR_OPEN_DEFAULT, 507 .fd = &init_files.fd_array[0], 508 .close_on_exec = init_files.close_on_exec_init, 509 .open_fds = init_files.open_fds_init, 510 .full_fds_bits = init_files.full_fds_bits_init, 511 }, 512 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock), 513 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait), 514 }; 515 516 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start) 517 { 518 unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */ 519 unsigned int maxbit = maxfd / BITS_PER_LONG; 520 unsigned int bitbit = start / BITS_PER_LONG; 521 unsigned int bit; 522 523 /* 524 * Try to avoid looking at the second level bitmap 525 */ 526 bit = find_next_zero_bit(&fdt->open_fds[bitbit], BITS_PER_LONG, 527 start & (BITS_PER_LONG - 1)); 528 if (bit < BITS_PER_LONG) 529 return bit + bitbit * BITS_PER_LONG; 530 531 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG; 532 if (bitbit >= maxfd) 533 return maxfd; 534 if (bitbit > start) 535 start = bitbit; 536 return find_next_zero_bit(fdt->open_fds, maxfd, start); 537 } 538 539 /* 540 * allocate a file descriptor, mark it busy. 541 */ 542 static int alloc_fd(unsigned start, unsigned end, unsigned flags) 543 { 544 struct files_struct *files = current->files; 545 unsigned int fd; 546 int error; 547 struct fdtable *fdt; 548 549 spin_lock(&files->file_lock); 550 repeat: 551 fdt = files_fdtable(files); 552 fd = start; 553 if (fd < files->next_fd) 554 fd = files->next_fd; 555 556 if (likely(fd < fdt->max_fds)) 557 fd = find_next_fd(fdt, fd); 558 559 /* 560 * N.B. For clone tasks sharing a files structure, this test 561 * will limit the total number of files that can be opened. 562 */ 563 error = -EMFILE; 564 if (unlikely(fd >= end)) 565 goto out; 566 567 if (unlikely(fd >= fdt->max_fds)) { 568 error = expand_files(files, fd); 569 if (error < 0) 570 goto out; 571 572 goto repeat; 573 } 574 575 if (start <= files->next_fd) 576 files->next_fd = fd + 1; 577 578 __set_open_fd(fd, fdt, flags & O_CLOEXEC); 579 error = fd; 580 581 out: 582 spin_unlock(&files->file_lock); 583 return error; 584 } 585 586 int __get_unused_fd_flags(unsigned flags, unsigned long nofile) 587 { 588 return alloc_fd(0, nofile, flags); 589 } 590 591 int get_unused_fd_flags(unsigned flags) 592 { 593 return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE)); 594 } 595 EXPORT_SYMBOL(get_unused_fd_flags); 596 597 static void __put_unused_fd(struct files_struct *files, unsigned int fd) 598 { 599 struct fdtable *fdt = files_fdtable(files); 600 __clear_open_fd(fd, fdt); 601 if (fd < files->next_fd) 602 files->next_fd = fd; 603 } 604 605 void put_unused_fd(unsigned int fd) 606 { 607 struct files_struct *files = current->files; 608 spin_lock(&files->file_lock); 609 __put_unused_fd(files, fd); 610 spin_unlock(&files->file_lock); 611 } 612 613 EXPORT_SYMBOL(put_unused_fd); 614 615 /* 616 * Install a file pointer in the fd array. 617 * 618 * The VFS is full of places where we drop the files lock between 619 * setting the open_fds bitmap and installing the file in the file 620 * array. At any such point, we are vulnerable to a dup2() race 621 * installing a file in the array before us. We need to detect this and 622 * fput() the struct file we are about to overwrite in this case. 623 * 624 * It should never happen - if we allow dup2() do it, _really_ bad things 625 * will follow. 626 * 627 * This consumes the "file" refcount, so callers should treat it 628 * as if they had called fput(file). 629 */ 630 631 void fd_install(unsigned int fd, struct file *file) 632 { 633 struct files_struct *files = current->files; 634 struct fdtable *fdt; 635 636 if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING))) 637 return; 638 639 rcu_read_lock_sched(); 640 641 if (unlikely(files->resize_in_progress)) { 642 rcu_read_unlock_sched(); 643 spin_lock(&files->file_lock); 644 fdt = files_fdtable(files); 645 WARN_ON(fdt->fd[fd] != NULL); 646 rcu_assign_pointer(fdt->fd[fd], file); 647 spin_unlock(&files->file_lock); 648 return; 649 } 650 /* coupled with smp_wmb() in expand_fdtable() */ 651 smp_rmb(); 652 fdt = rcu_dereference_sched(files->fdt); 653 BUG_ON(fdt->fd[fd] != NULL); 654 rcu_assign_pointer(fdt->fd[fd], file); 655 rcu_read_unlock_sched(); 656 } 657 658 EXPORT_SYMBOL(fd_install); 659 660 /** 661 * file_close_fd_locked - return file associated with fd 662 * @files: file struct to retrieve file from 663 * @fd: file descriptor to retrieve file for 664 * 665 * Doesn't take a separate reference count. 666 * 667 * Context: files_lock must be held. 668 * 669 * Returns: The file associated with @fd (NULL if @fd is not open) 670 */ 671 struct file *file_close_fd_locked(struct files_struct *files, unsigned fd) 672 { 673 struct fdtable *fdt = files_fdtable(files); 674 struct file *file; 675 676 lockdep_assert_held(&files->file_lock); 677 678 if (fd >= fdt->max_fds) 679 return NULL; 680 681 fd = array_index_nospec(fd, fdt->max_fds); 682 file = fdt->fd[fd]; 683 if (file) { 684 rcu_assign_pointer(fdt->fd[fd], NULL); 685 __put_unused_fd(files, fd); 686 } 687 return file; 688 } 689 690 int close_fd(unsigned fd) 691 { 692 struct files_struct *files = current->files; 693 struct file *file; 694 695 spin_lock(&files->file_lock); 696 file = file_close_fd_locked(files, fd); 697 spin_unlock(&files->file_lock); 698 if (!file) 699 return -EBADF; 700 701 return filp_close(file, files); 702 } 703 EXPORT_SYMBOL(close_fd); 704 705 /** 706 * last_fd - return last valid index into fd table 707 * @fdt: File descriptor table. 708 * 709 * Context: Either rcu read lock or files_lock must be held. 710 * 711 * Returns: Last valid index into fdtable. 712 */ 713 static inline unsigned last_fd(struct fdtable *fdt) 714 { 715 return fdt->max_fds - 1; 716 } 717 718 static inline void __range_cloexec(struct files_struct *cur_fds, 719 unsigned int fd, unsigned int max_fd) 720 { 721 struct fdtable *fdt; 722 723 /* make sure we're using the correct maximum value */ 724 spin_lock(&cur_fds->file_lock); 725 fdt = files_fdtable(cur_fds); 726 max_fd = min(last_fd(fdt), max_fd); 727 if (fd <= max_fd) 728 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1); 729 spin_unlock(&cur_fds->file_lock); 730 } 731 732 static inline void __range_close(struct files_struct *files, unsigned int fd, 733 unsigned int max_fd) 734 { 735 struct file *file; 736 unsigned n; 737 738 spin_lock(&files->file_lock); 739 n = last_fd(files_fdtable(files)); 740 max_fd = min(max_fd, n); 741 742 for (; fd <= max_fd; fd++) { 743 file = file_close_fd_locked(files, fd); 744 if (file) { 745 spin_unlock(&files->file_lock); 746 filp_close(file, files); 747 cond_resched(); 748 spin_lock(&files->file_lock); 749 } else if (need_resched()) { 750 spin_unlock(&files->file_lock); 751 cond_resched(); 752 spin_lock(&files->file_lock); 753 } 754 } 755 spin_unlock(&files->file_lock); 756 } 757 758 /** 759 * sys_close_range() - Close all file descriptors in a given range. 760 * 761 * @fd: starting file descriptor to close 762 * @max_fd: last file descriptor to close 763 * @flags: CLOSE_RANGE flags. 764 * 765 * This closes a range of file descriptors. All file descriptors 766 * from @fd up to and including @max_fd are closed. 767 * Currently, errors to close a given file descriptor are ignored. 768 */ 769 SYSCALL_DEFINE3(close_range, unsigned int, fd, unsigned int, max_fd, 770 unsigned int, flags) 771 { 772 struct task_struct *me = current; 773 struct files_struct *cur_fds = me->files, *fds = NULL; 774 775 if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC)) 776 return -EINVAL; 777 778 if (fd > max_fd) 779 return -EINVAL; 780 781 if ((flags & CLOSE_RANGE_UNSHARE) && atomic_read(&cur_fds->count) > 1) { 782 struct fd_range range = {fd, max_fd}, *punch_hole = ⦥ 783 784 /* 785 * If the caller requested all fds to be made cloexec we always 786 * copy all of the file descriptors since they still want to 787 * use them. 788 */ 789 if (flags & CLOSE_RANGE_CLOEXEC) 790 punch_hole = NULL; 791 792 fds = dup_fd(cur_fds, punch_hole); 793 if (IS_ERR(fds)) 794 return PTR_ERR(fds); 795 /* 796 * We used to share our file descriptor table, and have now 797 * created a private one, make sure we're using it below. 798 */ 799 swap(cur_fds, fds); 800 } 801 802 if (flags & CLOSE_RANGE_CLOEXEC) 803 __range_cloexec(cur_fds, fd, max_fd); 804 else 805 __range_close(cur_fds, fd, max_fd); 806 807 if (fds) { 808 /* 809 * We're done closing the files we were supposed to. Time to install 810 * the new file descriptor table and drop the old one. 811 */ 812 task_lock(me); 813 me->files = cur_fds; 814 task_unlock(me); 815 put_files_struct(fds); 816 } 817 818 return 0; 819 } 820 821 /** 822 * file_close_fd - return file associated with fd 823 * @fd: file descriptor to retrieve file for 824 * 825 * Doesn't take a separate reference count. 826 * 827 * Returns: The file associated with @fd (NULL if @fd is not open) 828 */ 829 struct file *file_close_fd(unsigned int fd) 830 { 831 struct files_struct *files = current->files; 832 struct file *file; 833 834 spin_lock(&files->file_lock); 835 file = file_close_fd_locked(files, fd); 836 spin_unlock(&files->file_lock); 837 838 return file; 839 } 840 841 void do_close_on_exec(struct files_struct *files) 842 { 843 unsigned i; 844 struct fdtable *fdt; 845 846 /* exec unshares first */ 847 spin_lock(&files->file_lock); 848 for (i = 0; ; i++) { 849 unsigned long set; 850 unsigned fd = i * BITS_PER_LONG; 851 fdt = files_fdtable(files); 852 if (fd >= fdt->max_fds) 853 break; 854 set = fdt->close_on_exec[i]; 855 if (!set) 856 continue; 857 fdt->close_on_exec[i] = 0; 858 for ( ; set ; fd++, set >>= 1) { 859 struct file *file; 860 if (!(set & 1)) 861 continue; 862 file = fdt->fd[fd]; 863 if (!file) 864 continue; 865 rcu_assign_pointer(fdt->fd[fd], NULL); 866 __put_unused_fd(files, fd); 867 spin_unlock(&files->file_lock); 868 filp_close(file, files); 869 cond_resched(); 870 spin_lock(&files->file_lock); 871 } 872 873 } 874 spin_unlock(&files->file_lock); 875 } 876 877 static struct file *__get_file_rcu(struct file __rcu **f) 878 { 879 struct file __rcu *file; 880 struct file __rcu *file_reloaded; 881 struct file __rcu *file_reloaded_cmp; 882 883 file = rcu_dereference_raw(*f); 884 if (!file) 885 return NULL; 886 887 if (unlikely(!file_ref_get(&file->f_ref))) 888 return ERR_PTR(-EAGAIN); 889 890 file_reloaded = rcu_dereference_raw(*f); 891 892 /* 893 * Ensure that all accesses have a dependency on the load from 894 * rcu_dereference_raw() above so we get correct ordering 895 * between reuse/allocation and the pointer check below. 896 */ 897 file_reloaded_cmp = file_reloaded; 898 OPTIMIZER_HIDE_VAR(file_reloaded_cmp); 899 900 /* 901 * file_ref_get() above provided a full memory barrier when we 902 * acquired a reference. 903 * 904 * This is paired with the write barrier from assigning to the 905 * __rcu protected file pointer so that if that pointer still 906 * matches the current file, we know we have successfully 907 * acquired a reference to the right file. 908 * 909 * If the pointers don't match the file has been reallocated by 910 * SLAB_TYPESAFE_BY_RCU. 911 */ 912 if (file == file_reloaded_cmp) 913 return file_reloaded; 914 915 fput(file); 916 return ERR_PTR(-EAGAIN); 917 } 918 919 /** 920 * get_file_rcu - try go get a reference to a file under rcu 921 * @f: the file to get a reference on 922 * 923 * This function tries to get a reference on @f carefully verifying that 924 * @f hasn't been reused. 925 * 926 * This function should rarely have to be used and only by users who 927 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it. 928 * 929 * Return: Returns @f with the reference count increased or NULL. 930 */ 931 struct file *get_file_rcu(struct file __rcu **f) 932 { 933 for (;;) { 934 struct file __rcu *file; 935 936 file = __get_file_rcu(f); 937 if (!IS_ERR(file)) 938 return file; 939 } 940 } 941 EXPORT_SYMBOL_GPL(get_file_rcu); 942 943 /** 944 * get_file_active - try go get a reference to a file 945 * @f: the file to get a reference on 946 * 947 * In contast to get_file_rcu() the pointer itself isn't part of the 948 * reference counting. 949 * 950 * This function should rarely have to be used and only by users who 951 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it. 952 * 953 * Return: Returns @f with the reference count increased or NULL. 954 */ 955 struct file *get_file_active(struct file **f) 956 { 957 struct file __rcu *file; 958 959 rcu_read_lock(); 960 file = __get_file_rcu(f); 961 rcu_read_unlock(); 962 if (IS_ERR(file)) 963 file = NULL; 964 return file; 965 } 966 EXPORT_SYMBOL_GPL(get_file_active); 967 968 static inline struct file *__fget_files_rcu(struct files_struct *files, 969 unsigned int fd, fmode_t mask) 970 { 971 for (;;) { 972 struct file *file; 973 struct fdtable *fdt = rcu_dereference_raw(files->fdt); 974 struct file __rcu **fdentry; 975 unsigned long nospec_mask; 976 977 /* Mask is a 0 for invalid fd's, ~0 for valid ones */ 978 nospec_mask = array_index_mask_nospec(fd, fdt->max_fds); 979 980 /* 981 * fdentry points to the 'fd' offset, or fdt->fd[0]. 982 * Loading from fdt->fd[0] is always safe, because the 983 * array always exists. 984 */ 985 fdentry = fdt->fd + (fd & nospec_mask); 986 987 /* Do the load, then mask any invalid result */ 988 file = rcu_dereference_raw(*fdentry); 989 file = (void *)(nospec_mask & (unsigned long)file); 990 if (unlikely(!file)) 991 return NULL; 992 993 /* 994 * Ok, we have a file pointer that was valid at 995 * some point, but it might have become stale since. 996 * 997 * We need to confirm it by incrementing the refcount 998 * and then check the lookup again. 999 * 1000 * file_ref_get() gives us a full memory barrier. We 1001 * only really need an 'acquire' one to protect the 1002 * loads below, but we don't have that. 1003 */ 1004 if (unlikely(!file_ref_get(&file->f_ref))) 1005 continue; 1006 1007 /* 1008 * Such a race can take two forms: 1009 * 1010 * (a) the file ref already went down to zero and the 1011 * file hasn't been reused yet or the file count 1012 * isn't zero but the file has already been reused. 1013 * 1014 * (b) the file table entry has changed under us. 1015 * Note that we don't need to re-check the 'fdt->fd' 1016 * pointer having changed, because it always goes 1017 * hand-in-hand with 'fdt'. 1018 * 1019 * If so, we need to put our ref and try again. 1020 */ 1021 if (unlikely(file != rcu_dereference_raw(*fdentry)) || 1022 unlikely(rcu_dereference_raw(files->fdt) != fdt)) { 1023 fput(file); 1024 continue; 1025 } 1026 1027 /* 1028 * This isn't the file we're looking for or we're not 1029 * allowed to get a reference to it. 1030 */ 1031 if (unlikely(file->f_mode & mask)) { 1032 fput(file); 1033 return NULL; 1034 } 1035 1036 /* 1037 * Ok, we have a ref to the file, and checked that it 1038 * still exists. 1039 */ 1040 return file; 1041 } 1042 } 1043 1044 static struct file *__fget_files(struct files_struct *files, unsigned int fd, 1045 fmode_t mask) 1046 { 1047 struct file *file; 1048 1049 rcu_read_lock(); 1050 file = __fget_files_rcu(files, fd, mask); 1051 rcu_read_unlock(); 1052 1053 return file; 1054 } 1055 1056 static inline struct file *__fget(unsigned int fd, fmode_t mask) 1057 { 1058 return __fget_files(current->files, fd, mask); 1059 } 1060 1061 struct file *fget(unsigned int fd) 1062 { 1063 return __fget(fd, FMODE_PATH); 1064 } 1065 EXPORT_SYMBOL(fget); 1066 1067 struct file *fget_raw(unsigned int fd) 1068 { 1069 return __fget(fd, 0); 1070 } 1071 EXPORT_SYMBOL(fget_raw); 1072 1073 struct file *fget_task(struct task_struct *task, unsigned int fd) 1074 { 1075 struct file *file = NULL; 1076 1077 task_lock(task); 1078 if (task->files) 1079 file = __fget_files(task->files, fd, 0); 1080 task_unlock(task); 1081 1082 return file; 1083 } 1084 1085 struct file *fget_task_next(struct task_struct *task, unsigned int *ret_fd) 1086 { 1087 /* Must be called with rcu_read_lock held */ 1088 struct files_struct *files; 1089 unsigned int fd = *ret_fd; 1090 struct file *file = NULL; 1091 1092 task_lock(task); 1093 files = task->files; 1094 if (files) { 1095 rcu_read_lock(); 1096 for (; fd < files_fdtable(files)->max_fds; fd++) { 1097 file = __fget_files_rcu(files, fd, 0); 1098 if (file) 1099 break; 1100 } 1101 rcu_read_unlock(); 1102 } 1103 task_unlock(task); 1104 *ret_fd = fd; 1105 return file; 1106 } 1107 EXPORT_SYMBOL(fget_task_next); 1108 1109 /* 1110 * Lightweight file lookup - no refcnt increment if fd table isn't shared. 1111 * 1112 * You can use this instead of fget if you satisfy all of the following 1113 * conditions: 1114 * 1) You must call fput_light before exiting the syscall and returning control 1115 * to userspace (i.e. you cannot remember the returned struct file * after 1116 * returning to userspace). 1117 * 2) You must not call filp_close on the returned struct file * in between 1118 * calls to fget_light and fput_light. 1119 * 3) You must not clone the current task in between the calls to fget_light 1120 * and fput_light. 1121 * 1122 * The fput_needed flag returned by fget_light should be passed to the 1123 * corresponding fput_light. 1124 * 1125 * (As an exception to rule 2, you can call filp_close between fget_light and 1126 * fput_light provided that you capture a real refcount with get_file before 1127 * the call to filp_close, and ensure that this real refcount is fput *after* 1128 * the fput_light call.) 1129 * 1130 * See also the documentation in rust/kernel/file.rs. 1131 */ 1132 static inline struct fd __fget_light(unsigned int fd, fmode_t mask) 1133 { 1134 struct files_struct *files = current->files; 1135 struct file *file; 1136 1137 /* 1138 * If another thread is concurrently calling close_fd() followed 1139 * by put_files_struct(), we must not observe the old table 1140 * entry combined with the new refcount - otherwise we could 1141 * return a file that is concurrently being freed. 1142 * 1143 * atomic_read_acquire() pairs with atomic_dec_and_test() in 1144 * put_files_struct(). 1145 */ 1146 if (likely(atomic_read_acquire(&files->count) == 1)) { 1147 file = files_lookup_fd_raw(files, fd); 1148 if (!file || unlikely(file->f_mode & mask)) 1149 return EMPTY_FD; 1150 return BORROWED_FD(file); 1151 } else { 1152 file = __fget_files(files, fd, mask); 1153 if (!file) 1154 return EMPTY_FD; 1155 return CLONED_FD(file); 1156 } 1157 } 1158 struct fd fdget(unsigned int fd) 1159 { 1160 return __fget_light(fd, FMODE_PATH); 1161 } 1162 EXPORT_SYMBOL(fdget); 1163 1164 struct fd fdget_raw(unsigned int fd) 1165 { 1166 return __fget_light(fd, 0); 1167 } 1168 1169 /* 1170 * Try to avoid f_pos locking. We only need it if the 1171 * file is marked for FMODE_ATOMIC_POS, and it can be 1172 * accessed multiple ways. 1173 * 1174 * Always do it for directories, because pidfd_getfd() 1175 * can make a file accessible even if it otherwise would 1176 * not be, and for directories this is a correctness 1177 * issue, not a "POSIX requirement". 1178 */ 1179 static inline bool file_needs_f_pos_lock(struct file *file) 1180 { 1181 return (file->f_mode & FMODE_ATOMIC_POS) && 1182 (file_count(file) > 1 || file->f_op->iterate_shared); 1183 } 1184 1185 struct fd fdget_pos(unsigned int fd) 1186 { 1187 struct fd f = fdget(fd); 1188 struct file *file = fd_file(f); 1189 1190 if (file && file_needs_f_pos_lock(file)) { 1191 f.word |= FDPUT_POS_UNLOCK; 1192 mutex_lock(&file->f_pos_lock); 1193 } 1194 return f; 1195 } 1196 1197 void __f_unlock_pos(struct file *f) 1198 { 1199 mutex_unlock(&f->f_pos_lock); 1200 } 1201 1202 /* 1203 * We only lock f_pos if we have threads or if the file might be 1204 * shared with another process. In both cases we'll have an elevated 1205 * file count (done either by fdget() or by fork()). 1206 */ 1207 1208 void set_close_on_exec(unsigned int fd, int flag) 1209 { 1210 struct files_struct *files = current->files; 1211 spin_lock(&files->file_lock); 1212 __set_close_on_exec(fd, files_fdtable(files), flag); 1213 spin_unlock(&files->file_lock); 1214 } 1215 1216 bool get_close_on_exec(unsigned int fd) 1217 { 1218 bool res; 1219 rcu_read_lock(); 1220 res = close_on_exec(fd, current->files); 1221 rcu_read_unlock(); 1222 return res; 1223 } 1224 1225 static int do_dup2(struct files_struct *files, 1226 struct file *file, unsigned fd, unsigned flags) 1227 __releases(&files->file_lock) 1228 { 1229 struct file *tofree; 1230 struct fdtable *fdt; 1231 1232 /* 1233 * We need to detect attempts to do dup2() over allocated but still 1234 * not finished descriptor. NB: OpenBSD avoids that at the price of 1235 * extra work in their equivalent of fget() - they insert struct 1236 * file immediately after grabbing descriptor, mark it larval if 1237 * more work (e.g. actual opening) is needed and make sure that 1238 * fget() treats larval files as absent. Potentially interesting, 1239 * but while extra work in fget() is trivial, locking implications 1240 * and amount of surgery on open()-related paths in VFS are not. 1241 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution" 1242 * deadlocks in rather amusing ways, AFAICS. All of that is out of 1243 * scope of POSIX or SUS, since neither considers shared descriptor 1244 * tables and this condition does not arise without those. 1245 */ 1246 fdt = files_fdtable(files); 1247 fd = array_index_nospec(fd, fdt->max_fds); 1248 tofree = fdt->fd[fd]; 1249 if (!tofree && fd_is_open(fd, fdt)) 1250 goto Ebusy; 1251 get_file(file); 1252 rcu_assign_pointer(fdt->fd[fd], file); 1253 __set_open_fd(fd, fdt, flags & O_CLOEXEC); 1254 spin_unlock(&files->file_lock); 1255 1256 if (tofree) 1257 filp_close(tofree, files); 1258 1259 return fd; 1260 1261 Ebusy: 1262 spin_unlock(&files->file_lock); 1263 return -EBUSY; 1264 } 1265 1266 int replace_fd(unsigned fd, struct file *file, unsigned flags) 1267 { 1268 int err; 1269 struct files_struct *files = current->files; 1270 1271 if (!file) 1272 return close_fd(fd); 1273 1274 if (fd >= rlimit(RLIMIT_NOFILE)) 1275 return -EBADF; 1276 1277 spin_lock(&files->file_lock); 1278 err = expand_files(files, fd); 1279 if (unlikely(err < 0)) 1280 goto out_unlock; 1281 return do_dup2(files, file, fd, flags); 1282 1283 out_unlock: 1284 spin_unlock(&files->file_lock); 1285 return err; 1286 } 1287 1288 /** 1289 * receive_fd() - Install received file into file descriptor table 1290 * @file: struct file that was received from another process 1291 * @ufd: __user pointer to write new fd number to 1292 * @o_flags: the O_* flags to apply to the new fd entry 1293 * 1294 * Installs a received file into the file descriptor table, with appropriate 1295 * checks and count updates. Optionally writes the fd number to userspace, if 1296 * @ufd is non-NULL. 1297 * 1298 * This helper handles its own reference counting of the incoming 1299 * struct file. 1300 * 1301 * Returns newly install fd or -ve on error. 1302 */ 1303 int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags) 1304 { 1305 int new_fd; 1306 int error; 1307 1308 error = security_file_receive(file); 1309 if (error) 1310 return error; 1311 1312 new_fd = get_unused_fd_flags(o_flags); 1313 if (new_fd < 0) 1314 return new_fd; 1315 1316 if (ufd) { 1317 error = put_user(new_fd, ufd); 1318 if (error) { 1319 put_unused_fd(new_fd); 1320 return error; 1321 } 1322 } 1323 1324 fd_install(new_fd, get_file(file)); 1325 __receive_sock(file); 1326 return new_fd; 1327 } 1328 EXPORT_SYMBOL_GPL(receive_fd); 1329 1330 int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags) 1331 { 1332 int error; 1333 1334 error = security_file_receive(file); 1335 if (error) 1336 return error; 1337 error = replace_fd(new_fd, file, o_flags); 1338 if (error) 1339 return error; 1340 __receive_sock(file); 1341 return new_fd; 1342 } 1343 1344 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags) 1345 { 1346 int err = -EBADF; 1347 struct file *file; 1348 struct files_struct *files = current->files; 1349 1350 if ((flags & ~O_CLOEXEC) != 0) 1351 return -EINVAL; 1352 1353 if (unlikely(oldfd == newfd)) 1354 return -EINVAL; 1355 1356 if (newfd >= rlimit(RLIMIT_NOFILE)) 1357 return -EBADF; 1358 1359 spin_lock(&files->file_lock); 1360 err = expand_files(files, newfd); 1361 file = files_lookup_fd_locked(files, oldfd); 1362 if (unlikely(!file)) 1363 goto Ebadf; 1364 if (unlikely(err < 0)) { 1365 if (err == -EMFILE) 1366 goto Ebadf; 1367 goto out_unlock; 1368 } 1369 return do_dup2(files, file, newfd, flags); 1370 1371 Ebadf: 1372 err = -EBADF; 1373 out_unlock: 1374 spin_unlock(&files->file_lock); 1375 return err; 1376 } 1377 1378 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags) 1379 { 1380 return ksys_dup3(oldfd, newfd, flags); 1381 } 1382 1383 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) 1384 { 1385 if (unlikely(newfd == oldfd)) { /* corner case */ 1386 struct files_struct *files = current->files; 1387 struct file *f; 1388 int retval = oldfd; 1389 1390 rcu_read_lock(); 1391 f = __fget_files_rcu(files, oldfd, 0); 1392 if (!f) 1393 retval = -EBADF; 1394 rcu_read_unlock(); 1395 if (f) 1396 fput(f); 1397 return retval; 1398 } 1399 return ksys_dup3(oldfd, newfd, 0); 1400 } 1401 1402 SYSCALL_DEFINE1(dup, unsigned int, fildes) 1403 { 1404 int ret = -EBADF; 1405 struct file *file = fget_raw(fildes); 1406 1407 if (file) { 1408 ret = get_unused_fd_flags(0); 1409 if (ret >= 0) 1410 fd_install(ret, file); 1411 else 1412 fput(file); 1413 } 1414 return ret; 1415 } 1416 1417 int f_dupfd(unsigned int from, struct file *file, unsigned flags) 1418 { 1419 unsigned long nofile = rlimit(RLIMIT_NOFILE); 1420 int err; 1421 if (from >= nofile) 1422 return -EINVAL; 1423 err = alloc_fd(from, nofile, flags); 1424 if (err >= 0) { 1425 get_file(file); 1426 fd_install(err, file); 1427 } 1428 return err; 1429 } 1430 1431 int iterate_fd(struct files_struct *files, unsigned n, 1432 int (*f)(const void *, struct file *, unsigned), 1433 const void *p) 1434 { 1435 struct fdtable *fdt; 1436 int res = 0; 1437 if (!files) 1438 return 0; 1439 spin_lock(&files->file_lock); 1440 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { 1441 struct file *file; 1442 file = rcu_dereference_check_fdtable(files, fdt->fd[n]); 1443 if (!file) 1444 continue; 1445 res = f(p, file, n); 1446 if (res) 1447 break; 1448 } 1449 spin_unlock(&files->file_lock); 1450 return res; 1451 } 1452 EXPORT_SYMBOL(iterate_fd); 1453