1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/file.c 4 * 5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes 6 * 7 * Manage the dynamic fd arrays in the process files_struct. 8 */ 9 10 #include <linux/syscalls.h> 11 #include <linux/export.h> 12 #include <linux/fs.h> 13 #include <linux/kernel.h> 14 #include <linux/mm.h> 15 #include <linux/sched/signal.h> 16 #include <linux/slab.h> 17 #include <linux/file.h> 18 #include <linux/fdtable.h> 19 #include <linux/bitops.h> 20 #include <linux/spinlock.h> 21 #include <linux/rcupdate.h> 22 #include <linux/close_range.h> 23 #include <linux/file_ref.h> 24 #include <net/sock.h> 25 #include <linux/init_task.h> 26 27 #include "internal.h" 28 29 /** 30 * __file_ref_put - Slowpath of file_ref_put() 31 * @ref: Pointer to the reference count 32 * @cnt: Current reference count 33 * 34 * Invoked when the reference count is outside of the valid zone. 35 * 36 * Return: 37 * True if this was the last reference with no future references 38 * possible. This signals the caller that it can safely schedule the 39 * object, which is protected by the reference counter, for 40 * deconstruction. 41 * 42 * False if there are still active references or the put() raced 43 * with a concurrent get()/put() pair. Caller is not allowed to 44 * deconstruct the protected object. 45 */ 46 bool __file_ref_put(file_ref_t *ref, unsigned long cnt) 47 { 48 /* Did this drop the last reference? */ 49 if (likely(cnt == FILE_REF_NOREF)) { 50 /* 51 * Carefully try to set the reference count to FILE_REF_DEAD. 52 * 53 * This can fail if a concurrent get() operation has 54 * elevated it again or the corresponding put() even marked 55 * it dead already. Both are valid situations and do not 56 * require a retry. If this fails the caller is not 57 * allowed to deconstruct the object. 58 */ 59 if (!atomic_long_try_cmpxchg_release(&ref->refcnt, &cnt, FILE_REF_DEAD)) 60 return false; 61 62 /* 63 * The caller can safely schedule the object for 64 * deconstruction. Provide acquire ordering. 65 */ 66 smp_acquire__after_ctrl_dep(); 67 return true; 68 } 69 70 /* 71 * If the reference count was already in the dead zone, then this 72 * put() operation is imbalanced. Warn, put the reference count back to 73 * DEAD and tell the caller to not deconstruct the object. 74 */ 75 if (WARN_ONCE(cnt >= FILE_REF_RELEASED, "imbalanced put on file reference count")) { 76 atomic_long_set(&ref->refcnt, FILE_REF_DEAD); 77 return false; 78 } 79 80 /* 81 * This is a put() operation on a saturated refcount. Restore the 82 * mean saturation value and tell the caller to not deconstruct the 83 * object. 84 */ 85 if (cnt > FILE_REF_MAXREF) 86 atomic_long_set(&ref->refcnt, FILE_REF_SATURATED); 87 return false; 88 } 89 EXPORT_SYMBOL_GPL(__file_ref_put); 90 91 unsigned int sysctl_nr_open __read_mostly = 1024*1024; 92 unsigned int sysctl_nr_open_min = BITS_PER_LONG; 93 /* our min() is unusable in constant expressions ;-/ */ 94 #define __const_min(x, y) ((x) < (y) ? (x) : (y)) 95 unsigned int sysctl_nr_open_max = 96 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG; 97 98 static void __free_fdtable(struct fdtable *fdt) 99 { 100 kvfree(fdt->fd); 101 kvfree(fdt->open_fds); 102 kfree(fdt); 103 } 104 105 static void free_fdtable_rcu(struct rcu_head *rcu) 106 { 107 __free_fdtable(container_of(rcu, struct fdtable, rcu)); 108 } 109 110 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr)) 111 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long)) 112 113 #define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds 114 /* 115 * Copy 'count' fd bits from the old table to the new table and clear the extra 116 * space if any. This does not copy the file pointers. Called with the files 117 * spinlock held for write. 118 */ 119 static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, 120 unsigned int copy_words) 121 { 122 unsigned int nwords = fdt_words(nfdt); 123 124 bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds, 125 copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG); 126 bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec, 127 copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG); 128 bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits, 129 copy_words, nwords); 130 } 131 132 /* 133 * Copy all file descriptors from the old table to the new, expanded table and 134 * clear the extra space. Called with the files spinlock held for write. 135 */ 136 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) 137 { 138 size_t cpy, set; 139 140 BUG_ON(nfdt->max_fds < ofdt->max_fds); 141 142 cpy = ofdt->max_fds * sizeof(struct file *); 143 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); 144 memcpy(nfdt->fd, ofdt->fd, cpy); 145 memset((char *)nfdt->fd + cpy, 0, set); 146 147 copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt)); 148 } 149 150 /* 151 * Note how the fdtable bitmap allocations very much have to be a multiple of 152 * BITS_PER_LONG. This is not only because we walk those things in chunks of 153 * 'unsigned long' in some places, but simply because that is how the Linux 154 * kernel bitmaps are defined to work: they are not "bits in an array of bytes", 155 * they are very much "bits in an array of unsigned long". 156 */ 157 static struct fdtable *alloc_fdtable(unsigned int slots_wanted) 158 { 159 struct fdtable *fdt; 160 unsigned int nr; 161 void *data; 162 163 /* 164 * Figure out how many fds we actually want to support in this fdtable. 165 * Allocation steps are keyed to the size of the fdarray, since it 166 * grows far faster than any of the other dynamic data. We try to fit 167 * the fdarray into comfortable page-tuned chunks: starting at 1024B 168 * and growing in powers of two from there on. Since we called only 169 * with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab 170 * already gives BITS_PER_LONG slots), the above boils down to 171 * 1. use the smallest power of two large enough to give us that many 172 * slots. 173 * 2. on 32bit skip 64 and 128 - the minimal capacity we want there is 174 * 256 slots (i.e. 1Kb fd array). 175 * 3. on 64bit don't skip anything, 1Kb fd array means 128 slots there 176 * and we are never going to be asked for 64 or less. 177 */ 178 if (IS_ENABLED(CONFIG_32BIT) && slots_wanted < 256) 179 nr = 256; 180 else 181 nr = roundup_pow_of_two(slots_wanted); 182 /* 183 * Note that this can drive nr *below* what we had passed if sysctl_nr_open 184 * had been set lower between the check in expand_files() and here. 185 * 186 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise 187 * bitmaps handling below becomes unpleasant, to put it mildly... 188 */ 189 if (unlikely(nr > sysctl_nr_open)) { 190 nr = round_down(sysctl_nr_open, BITS_PER_LONG); 191 if (nr < slots_wanted) 192 return ERR_PTR(-EMFILE); 193 } 194 195 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT); 196 if (!fdt) 197 goto out; 198 fdt->max_fds = nr; 199 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT); 200 if (!data) 201 goto out_fdt; 202 fdt->fd = data; 203 204 data = kvmalloc(max_t(size_t, 205 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES), 206 GFP_KERNEL_ACCOUNT); 207 if (!data) 208 goto out_arr; 209 fdt->open_fds = data; 210 data += nr / BITS_PER_BYTE; 211 fdt->close_on_exec = data; 212 data += nr / BITS_PER_BYTE; 213 fdt->full_fds_bits = data; 214 215 return fdt; 216 217 out_arr: 218 kvfree(fdt->fd); 219 out_fdt: 220 kfree(fdt); 221 out: 222 return ERR_PTR(-ENOMEM); 223 } 224 225 /* 226 * Expand the file descriptor table. 227 * This function will allocate a new fdtable and both fd array and fdset, of 228 * the given size. 229 * Return <0 error code on error; 0 on successful completion. 230 * The files->file_lock should be held on entry, and will be held on exit. 231 */ 232 static int expand_fdtable(struct files_struct *files, unsigned int nr) 233 __releases(files->file_lock) 234 __acquires(files->file_lock) 235 { 236 struct fdtable *new_fdt, *cur_fdt; 237 238 spin_unlock(&files->file_lock); 239 new_fdt = alloc_fdtable(nr + 1); 240 241 /* make sure all fd_install() have seen resize_in_progress 242 * or have finished their rcu_read_lock_sched() section. 243 */ 244 if (atomic_read(&files->count) > 1) 245 synchronize_rcu(); 246 247 spin_lock(&files->file_lock); 248 if (IS_ERR(new_fdt)) 249 return PTR_ERR(new_fdt); 250 cur_fdt = files_fdtable(files); 251 BUG_ON(nr < cur_fdt->max_fds); 252 copy_fdtable(new_fdt, cur_fdt); 253 rcu_assign_pointer(files->fdt, new_fdt); 254 if (cur_fdt != &files->fdtab) 255 call_rcu(&cur_fdt->rcu, free_fdtable_rcu); 256 /* coupled with smp_rmb() in fd_install() */ 257 smp_wmb(); 258 return 0; 259 } 260 261 /* 262 * Expand files. 263 * This function will expand the file structures, if the requested size exceeds 264 * the current capacity and there is room for expansion. 265 * Return <0 error code on error; 0 on success. 266 * The files->file_lock should be held on entry, and will be held on exit. 267 */ 268 static int expand_files(struct files_struct *files, unsigned int nr) 269 __releases(files->file_lock) 270 __acquires(files->file_lock) 271 { 272 struct fdtable *fdt; 273 int error; 274 275 repeat: 276 fdt = files_fdtable(files); 277 278 /* Do we need to expand? */ 279 if (nr < fdt->max_fds) 280 return 0; 281 282 if (unlikely(files->resize_in_progress)) { 283 spin_unlock(&files->file_lock); 284 wait_event(files->resize_wait, !files->resize_in_progress); 285 spin_lock(&files->file_lock); 286 goto repeat; 287 } 288 289 /* Can we expand? */ 290 if (unlikely(nr >= sysctl_nr_open)) 291 return -EMFILE; 292 293 /* All good, so we try */ 294 files->resize_in_progress = true; 295 error = expand_fdtable(files, nr); 296 files->resize_in_progress = false; 297 298 wake_up_all(&files->resize_wait); 299 return error; 300 } 301 302 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt, 303 bool set) 304 { 305 if (set) { 306 __set_bit(fd, fdt->close_on_exec); 307 } else { 308 if (test_bit(fd, fdt->close_on_exec)) 309 __clear_bit(fd, fdt->close_on_exec); 310 } 311 } 312 313 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt, bool set) 314 { 315 __set_bit(fd, fdt->open_fds); 316 __set_close_on_exec(fd, fdt, set); 317 fd /= BITS_PER_LONG; 318 if (!~fdt->open_fds[fd]) 319 __set_bit(fd, fdt->full_fds_bits); 320 } 321 322 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt) 323 { 324 __clear_bit(fd, fdt->open_fds); 325 fd /= BITS_PER_LONG; 326 if (test_bit(fd, fdt->full_fds_bits)) 327 __clear_bit(fd, fdt->full_fds_bits); 328 } 329 330 static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt) 331 { 332 return test_bit(fd, fdt->open_fds); 333 } 334 335 /* 336 * Note that a sane fdtable size always has to be a multiple of 337 * BITS_PER_LONG, since we have bitmaps that are sized by this. 338 * 339 * punch_hole is optional - when close_range() is asked to unshare 340 * and close, we don't need to copy descriptors in that range, so 341 * a smaller cloned descriptor table might suffice if the last 342 * currently opened descriptor falls into that range. 343 */ 344 static unsigned int sane_fdtable_size(struct fdtable *fdt, struct fd_range *punch_hole) 345 { 346 unsigned int last = find_last_bit(fdt->open_fds, fdt->max_fds); 347 348 if (last == fdt->max_fds) 349 return NR_OPEN_DEFAULT; 350 if (punch_hole && punch_hole->to >= last && punch_hole->from <= last) { 351 last = find_last_bit(fdt->open_fds, punch_hole->from); 352 if (last == punch_hole->from) 353 return NR_OPEN_DEFAULT; 354 } 355 return ALIGN(last + 1, BITS_PER_LONG); 356 } 357 358 /* 359 * Allocate a new descriptor table and copy contents from the passed in 360 * instance. Returns a pointer to cloned table on success, ERR_PTR() 361 * on failure. For 'punch_hole' see sane_fdtable_size(). 362 */ 363 struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_hole) 364 { 365 struct files_struct *newf; 366 struct file **old_fds, **new_fds; 367 unsigned int open_files, i; 368 struct fdtable *old_fdt, *new_fdt; 369 370 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); 371 if (!newf) 372 return ERR_PTR(-ENOMEM); 373 374 atomic_set(&newf->count, 1); 375 376 spin_lock_init(&newf->file_lock); 377 newf->resize_in_progress = false; 378 init_waitqueue_head(&newf->resize_wait); 379 newf->next_fd = 0; 380 new_fdt = &newf->fdtab; 381 new_fdt->max_fds = NR_OPEN_DEFAULT; 382 new_fdt->close_on_exec = newf->close_on_exec_init; 383 new_fdt->open_fds = newf->open_fds_init; 384 new_fdt->full_fds_bits = newf->full_fds_bits_init; 385 new_fdt->fd = &newf->fd_array[0]; 386 387 spin_lock(&oldf->file_lock); 388 old_fdt = files_fdtable(oldf); 389 open_files = sane_fdtable_size(old_fdt, punch_hole); 390 391 /* 392 * Check whether we need to allocate a larger fd array and fd set. 393 */ 394 while (unlikely(open_files > new_fdt->max_fds)) { 395 spin_unlock(&oldf->file_lock); 396 397 if (new_fdt != &newf->fdtab) 398 __free_fdtable(new_fdt); 399 400 new_fdt = alloc_fdtable(open_files); 401 if (IS_ERR(new_fdt)) { 402 kmem_cache_free(files_cachep, newf); 403 return ERR_CAST(new_fdt); 404 } 405 406 /* 407 * Reacquire the oldf lock and a pointer to its fd table 408 * who knows it may have a new bigger fd table. We need 409 * the latest pointer. 410 */ 411 spin_lock(&oldf->file_lock); 412 old_fdt = files_fdtable(oldf); 413 open_files = sane_fdtable_size(old_fdt, punch_hole); 414 } 415 416 copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG); 417 418 old_fds = old_fdt->fd; 419 new_fds = new_fdt->fd; 420 421 /* 422 * We may be racing against fd allocation from other threads using this 423 * files_struct, despite holding ->file_lock. 424 * 425 * alloc_fd() might have already claimed a slot, while fd_install() 426 * did not populate it yet. Note the latter operates locklessly, so 427 * the file can show up as we are walking the array below. 428 * 429 * At the same time we know no files will disappear as all other 430 * operations take the lock. 431 * 432 * Instead of trying to placate userspace racing with itself, we 433 * ref the file if we see it and mark the fd slot as unused otherwise. 434 */ 435 for (i = open_files; i != 0; i--) { 436 struct file *f = rcu_dereference_raw(*old_fds++); 437 if (f) { 438 get_file(f); 439 } else { 440 __clear_open_fd(open_files - i, new_fdt); 441 } 442 rcu_assign_pointer(*new_fds++, f); 443 } 444 spin_unlock(&oldf->file_lock); 445 446 /* clear the remainder */ 447 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *)); 448 449 rcu_assign_pointer(newf->fdt, new_fdt); 450 451 return newf; 452 } 453 454 static struct fdtable *close_files(struct files_struct * files) 455 { 456 /* 457 * It is safe to dereference the fd table without RCU or 458 * ->file_lock because this is the last reference to the 459 * files structure. 460 */ 461 struct fdtable *fdt = rcu_dereference_raw(files->fdt); 462 unsigned int i, j = 0; 463 464 for (;;) { 465 unsigned long set; 466 i = j * BITS_PER_LONG; 467 if (i >= fdt->max_fds) 468 break; 469 set = fdt->open_fds[j++]; 470 while (set) { 471 if (set & 1) { 472 struct file *file = fdt->fd[i]; 473 if (file) { 474 filp_close(file, files); 475 cond_resched(); 476 } 477 } 478 i++; 479 set >>= 1; 480 } 481 } 482 483 return fdt; 484 } 485 486 void put_files_struct(struct files_struct *files) 487 { 488 if (atomic_dec_and_test(&files->count)) { 489 struct fdtable *fdt = close_files(files); 490 491 /* free the arrays if they are not embedded */ 492 if (fdt != &files->fdtab) 493 __free_fdtable(fdt); 494 kmem_cache_free(files_cachep, files); 495 } 496 } 497 498 void exit_files(struct task_struct *tsk) 499 { 500 struct files_struct * files = tsk->files; 501 502 if (files) { 503 task_lock(tsk); 504 tsk->files = NULL; 505 task_unlock(tsk); 506 put_files_struct(files); 507 } 508 } 509 510 struct files_struct init_files = { 511 .count = ATOMIC_INIT(1), 512 .fdt = &init_files.fdtab, 513 .fdtab = { 514 .max_fds = NR_OPEN_DEFAULT, 515 .fd = &init_files.fd_array[0], 516 .close_on_exec = init_files.close_on_exec_init, 517 .open_fds = init_files.open_fds_init, 518 .full_fds_bits = init_files.full_fds_bits_init, 519 }, 520 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock), 521 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait), 522 }; 523 524 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start) 525 { 526 unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */ 527 unsigned int maxbit = maxfd / BITS_PER_LONG; 528 unsigned int bitbit = start / BITS_PER_LONG; 529 unsigned int bit; 530 531 /* 532 * Try to avoid looking at the second level bitmap 533 */ 534 bit = find_next_zero_bit(&fdt->open_fds[bitbit], BITS_PER_LONG, 535 start & (BITS_PER_LONG - 1)); 536 if (bit < BITS_PER_LONG) 537 return bit + bitbit * BITS_PER_LONG; 538 539 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG; 540 if (bitbit >= maxfd) 541 return maxfd; 542 if (bitbit > start) 543 start = bitbit; 544 return find_next_zero_bit(fdt->open_fds, maxfd, start); 545 } 546 547 /* 548 * allocate a file descriptor, mark it busy. 549 */ 550 static int alloc_fd(unsigned start, unsigned end, unsigned flags) 551 { 552 struct files_struct *files = current->files; 553 unsigned int fd; 554 int error; 555 struct fdtable *fdt; 556 557 spin_lock(&files->file_lock); 558 repeat: 559 fdt = files_fdtable(files); 560 fd = start; 561 if (fd < files->next_fd) 562 fd = files->next_fd; 563 564 if (likely(fd < fdt->max_fds)) 565 fd = find_next_fd(fdt, fd); 566 567 /* 568 * N.B. For clone tasks sharing a files structure, this test 569 * will limit the total number of files that can be opened. 570 */ 571 error = -EMFILE; 572 if (unlikely(fd >= end)) 573 goto out; 574 575 if (unlikely(fd >= fdt->max_fds)) { 576 error = expand_files(files, fd); 577 if (error < 0) 578 goto out; 579 580 goto repeat; 581 } 582 583 if (start <= files->next_fd) 584 files->next_fd = fd + 1; 585 586 __set_open_fd(fd, fdt, flags & O_CLOEXEC); 587 error = fd; 588 VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL); 589 590 out: 591 spin_unlock(&files->file_lock); 592 return error; 593 } 594 595 int __get_unused_fd_flags(unsigned flags, unsigned long nofile) 596 { 597 return alloc_fd(0, nofile, flags); 598 } 599 600 int get_unused_fd_flags(unsigned flags) 601 { 602 return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE)); 603 } 604 EXPORT_SYMBOL(get_unused_fd_flags); 605 606 static void __put_unused_fd(struct files_struct *files, unsigned int fd) 607 { 608 struct fdtable *fdt = files_fdtable(files); 609 __clear_open_fd(fd, fdt); 610 if (fd < files->next_fd) 611 files->next_fd = fd; 612 } 613 614 void put_unused_fd(unsigned int fd) 615 { 616 struct files_struct *files = current->files; 617 spin_lock(&files->file_lock); 618 __put_unused_fd(files, fd); 619 spin_unlock(&files->file_lock); 620 } 621 622 EXPORT_SYMBOL(put_unused_fd); 623 624 /** 625 * fd_install - install a file pointer in the fd array 626 * @fd: file descriptor to install the file in 627 * @file: the file to install 628 * 629 * This consumes the "file" refcount, so callers should treat it 630 * as if they had called fput(file). 631 */ 632 void fd_install(unsigned int fd, struct file *file) 633 { 634 struct files_struct *files = current->files; 635 struct fdtable *fdt; 636 637 if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING))) 638 return; 639 640 rcu_read_lock_sched(); 641 642 if (unlikely(files->resize_in_progress)) { 643 rcu_read_unlock_sched(); 644 spin_lock(&files->file_lock); 645 fdt = files_fdtable(files); 646 VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL); 647 rcu_assign_pointer(fdt->fd[fd], file); 648 spin_unlock(&files->file_lock); 649 return; 650 } 651 /* coupled with smp_wmb() in expand_fdtable() */ 652 smp_rmb(); 653 fdt = rcu_dereference_sched(files->fdt); 654 VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL); 655 rcu_assign_pointer(fdt->fd[fd], file); 656 rcu_read_unlock_sched(); 657 } 658 659 EXPORT_SYMBOL(fd_install); 660 661 /** 662 * file_close_fd_locked - return file associated with fd 663 * @files: file struct to retrieve file from 664 * @fd: file descriptor to retrieve file for 665 * 666 * Doesn't take a separate reference count. 667 * 668 * Context: files_lock must be held. 669 * 670 * Returns: The file associated with @fd (NULL if @fd is not open) 671 */ 672 struct file *file_close_fd_locked(struct files_struct *files, unsigned fd) 673 { 674 struct fdtable *fdt = files_fdtable(files); 675 struct file *file; 676 677 lockdep_assert_held(&files->file_lock); 678 679 if (fd >= fdt->max_fds) 680 return NULL; 681 682 fd = array_index_nospec(fd, fdt->max_fds); 683 file = rcu_dereference_raw(fdt->fd[fd]); 684 if (file) { 685 rcu_assign_pointer(fdt->fd[fd], NULL); 686 __put_unused_fd(files, fd); 687 } 688 return file; 689 } 690 691 int close_fd(unsigned fd) 692 { 693 struct files_struct *files = current->files; 694 struct file *file; 695 696 spin_lock(&files->file_lock); 697 file = file_close_fd_locked(files, fd); 698 spin_unlock(&files->file_lock); 699 if (!file) 700 return -EBADF; 701 702 return filp_close(file, files); 703 } 704 EXPORT_SYMBOL(close_fd); 705 706 /** 707 * last_fd - return last valid index into fd table 708 * @fdt: File descriptor table. 709 * 710 * Context: Either rcu read lock or files_lock must be held. 711 * 712 * Returns: Last valid index into fdtable. 713 */ 714 static inline unsigned last_fd(struct fdtable *fdt) 715 { 716 return fdt->max_fds - 1; 717 } 718 719 static inline void __range_cloexec(struct files_struct *cur_fds, 720 unsigned int fd, unsigned int max_fd) 721 { 722 struct fdtable *fdt; 723 724 /* make sure we're using the correct maximum value */ 725 spin_lock(&cur_fds->file_lock); 726 fdt = files_fdtable(cur_fds); 727 max_fd = min(last_fd(fdt), max_fd); 728 if (fd <= max_fd) 729 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1); 730 spin_unlock(&cur_fds->file_lock); 731 } 732 733 static inline void __range_close(struct files_struct *files, unsigned int fd, 734 unsigned int max_fd) 735 { 736 struct file *file; 737 unsigned n; 738 739 spin_lock(&files->file_lock); 740 n = last_fd(files_fdtable(files)); 741 max_fd = min(max_fd, n); 742 743 for (; fd <= max_fd; fd++) { 744 file = file_close_fd_locked(files, fd); 745 if (file) { 746 spin_unlock(&files->file_lock); 747 filp_close(file, files); 748 cond_resched(); 749 spin_lock(&files->file_lock); 750 } else if (need_resched()) { 751 spin_unlock(&files->file_lock); 752 cond_resched(); 753 spin_lock(&files->file_lock); 754 } 755 } 756 spin_unlock(&files->file_lock); 757 } 758 759 /** 760 * sys_close_range() - Close all file descriptors in a given range. 761 * 762 * @fd: starting file descriptor to close 763 * @max_fd: last file descriptor to close 764 * @flags: CLOSE_RANGE flags. 765 * 766 * This closes a range of file descriptors. All file descriptors 767 * from @fd up to and including @max_fd are closed. 768 * Currently, errors to close a given file descriptor are ignored. 769 */ 770 SYSCALL_DEFINE3(close_range, unsigned int, fd, unsigned int, max_fd, 771 unsigned int, flags) 772 { 773 struct task_struct *me = current; 774 struct files_struct *cur_fds = me->files, *fds = NULL; 775 776 if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC)) 777 return -EINVAL; 778 779 if (fd > max_fd) 780 return -EINVAL; 781 782 if ((flags & CLOSE_RANGE_UNSHARE) && atomic_read(&cur_fds->count) > 1) { 783 struct fd_range range = {fd, max_fd}, *punch_hole = ⦥ 784 785 /* 786 * If the caller requested all fds to be made cloexec we always 787 * copy all of the file descriptors since they still want to 788 * use them. 789 */ 790 if (flags & CLOSE_RANGE_CLOEXEC) 791 punch_hole = NULL; 792 793 fds = dup_fd(cur_fds, punch_hole); 794 if (IS_ERR(fds)) 795 return PTR_ERR(fds); 796 /* 797 * We used to share our file descriptor table, and have now 798 * created a private one, make sure we're using it below. 799 */ 800 swap(cur_fds, fds); 801 } 802 803 if (flags & CLOSE_RANGE_CLOEXEC) 804 __range_cloexec(cur_fds, fd, max_fd); 805 else 806 __range_close(cur_fds, fd, max_fd); 807 808 if (fds) { 809 /* 810 * We're done closing the files we were supposed to. Time to install 811 * the new file descriptor table and drop the old one. 812 */ 813 task_lock(me); 814 me->files = cur_fds; 815 task_unlock(me); 816 put_files_struct(fds); 817 } 818 819 return 0; 820 } 821 822 /** 823 * file_close_fd - return file associated with fd 824 * @fd: file descriptor to retrieve file for 825 * 826 * Doesn't take a separate reference count. 827 * 828 * Returns: The file associated with @fd (NULL if @fd is not open) 829 */ 830 struct file *file_close_fd(unsigned int fd) 831 { 832 struct files_struct *files = current->files; 833 struct file *file; 834 835 spin_lock(&files->file_lock); 836 file = file_close_fd_locked(files, fd); 837 spin_unlock(&files->file_lock); 838 839 return file; 840 } 841 842 void do_close_on_exec(struct files_struct *files) 843 { 844 unsigned i; 845 struct fdtable *fdt; 846 847 /* exec unshares first */ 848 spin_lock(&files->file_lock); 849 for (i = 0; ; i++) { 850 unsigned long set; 851 unsigned fd = i * BITS_PER_LONG; 852 fdt = files_fdtable(files); 853 if (fd >= fdt->max_fds) 854 break; 855 set = fdt->close_on_exec[i]; 856 if (!set) 857 continue; 858 fdt->close_on_exec[i] = 0; 859 for ( ; set ; fd++, set >>= 1) { 860 struct file *file; 861 if (!(set & 1)) 862 continue; 863 file = fdt->fd[fd]; 864 if (!file) 865 continue; 866 rcu_assign_pointer(fdt->fd[fd], NULL); 867 __put_unused_fd(files, fd); 868 spin_unlock(&files->file_lock); 869 filp_close(file, files); 870 cond_resched(); 871 spin_lock(&files->file_lock); 872 } 873 874 } 875 spin_unlock(&files->file_lock); 876 } 877 878 static struct file *__get_file_rcu(struct file __rcu **f) 879 { 880 struct file __rcu *file; 881 struct file __rcu *file_reloaded; 882 struct file __rcu *file_reloaded_cmp; 883 884 file = rcu_dereference_raw(*f); 885 if (!file) 886 return NULL; 887 888 if (unlikely(!file_ref_get(&file->f_ref))) 889 return ERR_PTR(-EAGAIN); 890 891 file_reloaded = rcu_dereference_raw(*f); 892 893 /* 894 * Ensure that all accesses have a dependency on the load from 895 * rcu_dereference_raw() above so we get correct ordering 896 * between reuse/allocation and the pointer check below. 897 */ 898 file_reloaded_cmp = file_reloaded; 899 OPTIMIZER_HIDE_VAR(file_reloaded_cmp); 900 901 /* 902 * file_ref_get() above provided a full memory barrier when we 903 * acquired a reference. 904 * 905 * This is paired with the write barrier from assigning to the 906 * __rcu protected file pointer so that if that pointer still 907 * matches the current file, we know we have successfully 908 * acquired a reference to the right file. 909 * 910 * If the pointers don't match the file has been reallocated by 911 * SLAB_TYPESAFE_BY_RCU. 912 */ 913 if (file == file_reloaded_cmp) 914 return file_reloaded; 915 916 fput(file); 917 return ERR_PTR(-EAGAIN); 918 } 919 920 /** 921 * get_file_rcu - try go get a reference to a file under rcu 922 * @f: the file to get a reference on 923 * 924 * This function tries to get a reference on @f carefully verifying that 925 * @f hasn't been reused. 926 * 927 * This function should rarely have to be used and only by users who 928 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it. 929 * 930 * Return: Returns @f with the reference count increased or NULL. 931 */ 932 struct file *get_file_rcu(struct file __rcu **f) 933 { 934 for (;;) { 935 struct file __rcu *file; 936 937 file = __get_file_rcu(f); 938 if (!IS_ERR(file)) 939 return file; 940 } 941 } 942 EXPORT_SYMBOL_GPL(get_file_rcu); 943 944 /** 945 * get_file_active - try go get a reference to a file 946 * @f: the file to get a reference on 947 * 948 * In contast to get_file_rcu() the pointer itself isn't part of the 949 * reference counting. 950 * 951 * This function should rarely have to be used and only by users who 952 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it. 953 * 954 * Return: Returns @f with the reference count increased or NULL. 955 */ 956 struct file *get_file_active(struct file **f) 957 { 958 struct file __rcu *file; 959 960 rcu_read_lock(); 961 file = __get_file_rcu(f); 962 rcu_read_unlock(); 963 if (IS_ERR(file)) 964 file = NULL; 965 return file; 966 } 967 EXPORT_SYMBOL_GPL(get_file_active); 968 969 static inline struct file *__fget_files_rcu(struct files_struct *files, 970 unsigned int fd, fmode_t mask) 971 { 972 for (;;) { 973 struct file *file; 974 struct fdtable *fdt = rcu_dereference_raw(files->fdt); 975 struct file __rcu **fdentry; 976 unsigned long nospec_mask; 977 978 /* Mask is a 0 for invalid fd's, ~0 for valid ones */ 979 nospec_mask = array_index_mask_nospec(fd, fdt->max_fds); 980 981 /* 982 * fdentry points to the 'fd' offset, or fdt->fd[0]. 983 * Loading from fdt->fd[0] is always safe, because the 984 * array always exists. 985 */ 986 fdentry = fdt->fd + (fd & nospec_mask); 987 988 /* Do the load, then mask any invalid result */ 989 file = rcu_dereference_raw(*fdentry); 990 file = (void *)(nospec_mask & (unsigned long)file); 991 if (unlikely(!file)) 992 return NULL; 993 994 /* 995 * Ok, we have a file pointer that was valid at 996 * some point, but it might have become stale since. 997 * 998 * We need to confirm it by incrementing the refcount 999 * and then check the lookup again. 1000 * 1001 * file_ref_get() gives us a full memory barrier. We 1002 * only really need an 'acquire' one to protect the 1003 * loads below, but we don't have that. 1004 */ 1005 if (unlikely(!file_ref_get(&file->f_ref))) 1006 continue; 1007 1008 /* 1009 * Such a race can take two forms: 1010 * 1011 * (a) the file ref already went down to zero and the 1012 * file hasn't been reused yet or the file count 1013 * isn't zero but the file has already been reused. 1014 * 1015 * (b) the file table entry has changed under us. 1016 * Note that we don't need to re-check the 'fdt->fd' 1017 * pointer having changed, because it always goes 1018 * hand-in-hand with 'fdt'. 1019 * 1020 * If so, we need to put our ref and try again. 1021 */ 1022 if (unlikely(file != rcu_dereference_raw(*fdentry)) || 1023 unlikely(rcu_dereference_raw(files->fdt) != fdt)) { 1024 fput(file); 1025 continue; 1026 } 1027 1028 /* 1029 * This isn't the file we're looking for or we're not 1030 * allowed to get a reference to it. 1031 */ 1032 if (unlikely(file->f_mode & mask)) { 1033 fput(file); 1034 return NULL; 1035 } 1036 1037 /* 1038 * Ok, we have a ref to the file, and checked that it 1039 * still exists. 1040 */ 1041 return file; 1042 } 1043 } 1044 1045 static struct file *__fget_files(struct files_struct *files, unsigned int fd, 1046 fmode_t mask) 1047 { 1048 struct file *file; 1049 1050 rcu_read_lock(); 1051 file = __fget_files_rcu(files, fd, mask); 1052 rcu_read_unlock(); 1053 1054 return file; 1055 } 1056 1057 static inline struct file *__fget(unsigned int fd, fmode_t mask) 1058 { 1059 return __fget_files(current->files, fd, mask); 1060 } 1061 1062 struct file *fget(unsigned int fd) 1063 { 1064 return __fget(fd, FMODE_PATH); 1065 } 1066 EXPORT_SYMBOL(fget); 1067 1068 struct file *fget_raw(unsigned int fd) 1069 { 1070 return __fget(fd, 0); 1071 } 1072 EXPORT_SYMBOL(fget_raw); 1073 1074 struct file *fget_task(struct task_struct *task, unsigned int fd) 1075 { 1076 struct file *file = NULL; 1077 1078 task_lock(task); 1079 if (task->files) 1080 file = __fget_files(task->files, fd, 0); 1081 task_unlock(task); 1082 1083 return file; 1084 } 1085 1086 struct file *fget_task_next(struct task_struct *task, unsigned int *ret_fd) 1087 { 1088 /* Must be called with rcu_read_lock held */ 1089 struct files_struct *files; 1090 unsigned int fd = *ret_fd; 1091 struct file *file = NULL; 1092 1093 task_lock(task); 1094 files = task->files; 1095 if (files) { 1096 rcu_read_lock(); 1097 for (; fd < files_fdtable(files)->max_fds; fd++) { 1098 file = __fget_files_rcu(files, fd, 0); 1099 if (file) 1100 break; 1101 } 1102 rcu_read_unlock(); 1103 } 1104 task_unlock(task); 1105 *ret_fd = fd; 1106 return file; 1107 } 1108 EXPORT_SYMBOL(fget_task_next); 1109 1110 /* 1111 * Lightweight file lookup - no refcnt increment if fd table isn't shared. 1112 * 1113 * You can use this instead of fget if you satisfy all of the following 1114 * conditions: 1115 * 1) You must call fput_light before exiting the syscall and returning control 1116 * to userspace (i.e. you cannot remember the returned struct file * after 1117 * returning to userspace). 1118 * 2) You must not call filp_close on the returned struct file * in between 1119 * calls to fget_light and fput_light. 1120 * 3) You must not clone the current task in between the calls to fget_light 1121 * and fput_light. 1122 * 1123 * The fput_needed flag returned by fget_light should be passed to the 1124 * corresponding fput_light. 1125 * 1126 * (As an exception to rule 2, you can call filp_close between fget_light and 1127 * fput_light provided that you capture a real refcount with get_file before 1128 * the call to filp_close, and ensure that this real refcount is fput *after* 1129 * the fput_light call.) 1130 * 1131 * See also the documentation in rust/kernel/file.rs. 1132 */ 1133 static inline struct fd __fget_light(unsigned int fd, fmode_t mask) 1134 { 1135 struct files_struct *files = current->files; 1136 struct file *file; 1137 1138 /* 1139 * If another thread is concurrently calling close_fd() followed 1140 * by put_files_struct(), we must not observe the old table 1141 * entry combined with the new refcount - otherwise we could 1142 * return a file that is concurrently being freed. 1143 * 1144 * atomic_read_acquire() pairs with atomic_dec_and_test() in 1145 * put_files_struct(). 1146 */ 1147 if (likely(atomic_read_acquire(&files->count) == 1)) { 1148 file = files_lookup_fd_raw(files, fd); 1149 if (!file || unlikely(file->f_mode & mask)) 1150 return EMPTY_FD; 1151 return BORROWED_FD(file); 1152 } else { 1153 file = __fget_files(files, fd, mask); 1154 if (!file) 1155 return EMPTY_FD; 1156 return CLONED_FD(file); 1157 } 1158 } 1159 struct fd fdget(unsigned int fd) 1160 { 1161 return __fget_light(fd, FMODE_PATH); 1162 } 1163 EXPORT_SYMBOL(fdget); 1164 1165 struct fd fdget_raw(unsigned int fd) 1166 { 1167 return __fget_light(fd, 0); 1168 } 1169 1170 /* 1171 * Try to avoid f_pos locking. We only need it if the 1172 * file is marked for FMODE_ATOMIC_POS, and it can be 1173 * accessed multiple ways. 1174 * 1175 * Always do it for directories, because pidfd_getfd() 1176 * can make a file accessible even if it otherwise would 1177 * not be, and for directories this is a correctness 1178 * issue, not a "POSIX requirement". 1179 */ 1180 static inline bool file_needs_f_pos_lock(struct file *file) 1181 { 1182 return (file->f_mode & FMODE_ATOMIC_POS) && 1183 (file_count(file) > 1 || file->f_op->iterate_shared); 1184 } 1185 1186 bool file_seek_cur_needs_f_lock(struct file *file) 1187 { 1188 if (!(file->f_mode & FMODE_ATOMIC_POS) && !file->f_op->iterate_shared) 1189 return false; 1190 1191 VFS_WARN_ON_ONCE((file_count(file) > 1) && 1192 !mutex_is_locked(&file->f_pos_lock)); 1193 return true; 1194 } 1195 1196 struct fd fdget_pos(unsigned int fd) 1197 { 1198 struct fd f = fdget(fd); 1199 struct file *file = fd_file(f); 1200 1201 if (file && file_needs_f_pos_lock(file)) { 1202 f.word |= FDPUT_POS_UNLOCK; 1203 mutex_lock(&file->f_pos_lock); 1204 } 1205 return f; 1206 } 1207 1208 void __f_unlock_pos(struct file *f) 1209 { 1210 mutex_unlock(&f->f_pos_lock); 1211 } 1212 1213 /* 1214 * We only lock f_pos if we have threads or if the file might be 1215 * shared with another process. In both cases we'll have an elevated 1216 * file count (done either by fdget() or by fork()). 1217 */ 1218 1219 void set_close_on_exec(unsigned int fd, int flag) 1220 { 1221 struct files_struct *files = current->files; 1222 spin_lock(&files->file_lock); 1223 __set_close_on_exec(fd, files_fdtable(files), flag); 1224 spin_unlock(&files->file_lock); 1225 } 1226 1227 bool get_close_on_exec(unsigned int fd) 1228 { 1229 bool res; 1230 rcu_read_lock(); 1231 res = close_on_exec(fd, current->files); 1232 rcu_read_unlock(); 1233 return res; 1234 } 1235 1236 static int do_dup2(struct files_struct *files, 1237 struct file *file, unsigned fd, unsigned flags) 1238 __releases(&files->file_lock) 1239 { 1240 struct file *tofree; 1241 struct fdtable *fdt; 1242 1243 /* 1244 * dup2() is expected to close the file installed in the target fd slot 1245 * (if any). However, userspace hand-picking a fd may be racing against 1246 * its own threads which happened to allocate it in open() et al but did 1247 * not populate it yet. 1248 * 1249 * Broadly speaking we may be racing against the following: 1250 * fd = get_unused_fd_flags(); // fd slot reserved, ->fd[fd] == NULL 1251 * file = hard_work_goes_here(); 1252 * fd_install(fd, file); // only now ->fd[fd] == file 1253 * 1254 * It is an invariant that a successfully allocated fd has a NULL entry 1255 * in the array until the matching fd_install(). 1256 * 1257 * If we fit the window, we have the fd to populate, yet no target file 1258 * to close. Trying to ignore it and install our new file would violate 1259 * the invariant and make fd_install() overwrite our file. 1260 * 1261 * Things can be done(tm) to handle this. However, the issue does not 1262 * concern legitimate programs and we only need to make sure the kernel 1263 * does not trip over it. 1264 * 1265 * The simplest way out is to return an error if we find ourselves here. 1266 * 1267 * POSIX is silent on the issue, we return -EBUSY. 1268 */ 1269 fdt = files_fdtable(files); 1270 fd = array_index_nospec(fd, fdt->max_fds); 1271 tofree = rcu_dereference_raw(fdt->fd[fd]); 1272 if (!tofree && fd_is_open(fd, fdt)) 1273 goto Ebusy; 1274 get_file(file); 1275 rcu_assign_pointer(fdt->fd[fd], file); 1276 __set_open_fd(fd, fdt, flags & O_CLOEXEC); 1277 spin_unlock(&files->file_lock); 1278 1279 if (tofree) 1280 filp_close(tofree, files); 1281 1282 return fd; 1283 1284 Ebusy: 1285 spin_unlock(&files->file_lock); 1286 return -EBUSY; 1287 } 1288 1289 int replace_fd(unsigned fd, struct file *file, unsigned flags) 1290 { 1291 int err; 1292 struct files_struct *files = current->files; 1293 1294 if (!file) 1295 return close_fd(fd); 1296 1297 if (fd >= rlimit(RLIMIT_NOFILE)) 1298 return -EBADF; 1299 1300 spin_lock(&files->file_lock); 1301 err = expand_files(files, fd); 1302 if (unlikely(err < 0)) 1303 goto out_unlock; 1304 return do_dup2(files, file, fd, flags); 1305 1306 out_unlock: 1307 spin_unlock(&files->file_lock); 1308 return err; 1309 } 1310 1311 /** 1312 * receive_fd() - Install received file into file descriptor table 1313 * @file: struct file that was received from another process 1314 * @ufd: __user pointer to write new fd number to 1315 * @o_flags: the O_* flags to apply to the new fd entry 1316 * 1317 * Installs a received file into the file descriptor table, with appropriate 1318 * checks and count updates. Optionally writes the fd number to userspace, if 1319 * @ufd is non-NULL. 1320 * 1321 * This helper handles its own reference counting of the incoming 1322 * struct file. 1323 * 1324 * Returns newly install fd or -ve on error. 1325 */ 1326 int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags) 1327 { 1328 int new_fd; 1329 int error; 1330 1331 error = security_file_receive(file); 1332 if (error) 1333 return error; 1334 1335 new_fd = get_unused_fd_flags(o_flags); 1336 if (new_fd < 0) 1337 return new_fd; 1338 1339 if (ufd) { 1340 error = put_user(new_fd, ufd); 1341 if (error) { 1342 put_unused_fd(new_fd); 1343 return error; 1344 } 1345 } 1346 1347 fd_install(new_fd, get_file(file)); 1348 __receive_sock(file); 1349 return new_fd; 1350 } 1351 EXPORT_SYMBOL_GPL(receive_fd); 1352 1353 int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags) 1354 { 1355 int error; 1356 1357 error = security_file_receive(file); 1358 if (error) 1359 return error; 1360 error = replace_fd(new_fd, file, o_flags); 1361 if (error) 1362 return error; 1363 __receive_sock(file); 1364 return new_fd; 1365 } 1366 1367 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags) 1368 { 1369 int err = -EBADF; 1370 struct file *file; 1371 struct files_struct *files = current->files; 1372 1373 if ((flags & ~O_CLOEXEC) != 0) 1374 return -EINVAL; 1375 1376 if (unlikely(oldfd == newfd)) 1377 return -EINVAL; 1378 1379 if (newfd >= rlimit(RLIMIT_NOFILE)) 1380 return -EBADF; 1381 1382 spin_lock(&files->file_lock); 1383 err = expand_files(files, newfd); 1384 file = files_lookup_fd_locked(files, oldfd); 1385 if (unlikely(!file)) 1386 goto Ebadf; 1387 if (unlikely(err < 0)) { 1388 if (err == -EMFILE) 1389 goto Ebadf; 1390 goto out_unlock; 1391 } 1392 return do_dup2(files, file, newfd, flags); 1393 1394 Ebadf: 1395 err = -EBADF; 1396 out_unlock: 1397 spin_unlock(&files->file_lock); 1398 return err; 1399 } 1400 1401 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags) 1402 { 1403 return ksys_dup3(oldfd, newfd, flags); 1404 } 1405 1406 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) 1407 { 1408 if (unlikely(newfd == oldfd)) { /* corner case */ 1409 struct files_struct *files = current->files; 1410 struct file *f; 1411 int retval = oldfd; 1412 1413 rcu_read_lock(); 1414 f = __fget_files_rcu(files, oldfd, 0); 1415 if (!f) 1416 retval = -EBADF; 1417 rcu_read_unlock(); 1418 if (f) 1419 fput(f); 1420 return retval; 1421 } 1422 return ksys_dup3(oldfd, newfd, 0); 1423 } 1424 1425 SYSCALL_DEFINE1(dup, unsigned int, fildes) 1426 { 1427 int ret = -EBADF; 1428 struct file *file = fget_raw(fildes); 1429 1430 if (file) { 1431 ret = get_unused_fd_flags(0); 1432 if (ret >= 0) 1433 fd_install(ret, file); 1434 else 1435 fput(file); 1436 } 1437 return ret; 1438 } 1439 1440 int f_dupfd(unsigned int from, struct file *file, unsigned flags) 1441 { 1442 unsigned long nofile = rlimit(RLIMIT_NOFILE); 1443 int err; 1444 if (from >= nofile) 1445 return -EINVAL; 1446 err = alloc_fd(from, nofile, flags); 1447 if (err >= 0) { 1448 get_file(file); 1449 fd_install(err, file); 1450 } 1451 return err; 1452 } 1453 1454 int iterate_fd(struct files_struct *files, unsigned n, 1455 int (*f)(const void *, struct file *, unsigned), 1456 const void *p) 1457 { 1458 struct fdtable *fdt; 1459 int res = 0; 1460 if (!files) 1461 return 0; 1462 spin_lock(&files->file_lock); 1463 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { 1464 struct file *file; 1465 file = rcu_dereference_check_fdtable(files, fdt->fd[n]); 1466 if (!file) 1467 continue; 1468 res = f(p, file, n); 1469 if (res) 1470 break; 1471 } 1472 spin_unlock(&files->file_lock); 1473 return res; 1474 } 1475 EXPORT_SYMBOL(iterate_fd); 1476