Lines Matching +full:no +full:- +full:ref +full:- +full:current +full:- +full:limit
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
29 static noinline bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt) in __file_ref_put_badval() argument
37 atomic_long_set(&ref->refcnt, FILE_REF_DEAD); in __file_ref_put_badval()
47 atomic_long_set(&ref->refcnt, FILE_REF_SATURATED); in __file_ref_put_badval()
52 * __file_ref_put - Slowpath of file_ref_put()
53 * @ref: Pointer to the reference count
54 * @cnt: Current reference count
59 * True if this was the last reference with no future references
68 bool __file_ref_put(file_ref_t *ref, unsigned long cnt) in __file_ref_put() argument
81 if (!atomic_long_try_cmpxchg_release(&ref->refcnt, &cnt, FILE_REF_DEAD)) in __file_ref_put()
92 return __file_ref_put_badval(ref, cnt); in __file_ref_put()
98 /* our min() is unusable in constant expressions ;-/ */
101 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
105 kvfree(fdt->fd); in __free_fdtable()
106 kvfree(fdt->open_fds); in __free_fdtable()
118 #define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
129 bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds, in copy_fd_bitmaps()
131 bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec, in copy_fd_bitmaps()
133 bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits, in copy_fd_bitmaps()
145 BUG_ON(nfdt->max_fds < ofdt->max_fds); in copy_fdtable()
147 cpy = ofdt->max_fds * sizeof(struct file *); in copy_fdtable()
148 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); in copy_fdtable()
149 memcpy(nfdt->fd, ofdt->fd, cpy); in copy_fdtable()
150 memset((char *)nfdt->fd + cpy, 0, set); in copy_fdtable()
172 * the fdarray into comfortable page-tuned chunks: starting at 1024B in alloc_fdtable()
174 * with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab in alloc_fdtable()
178 * 2. on 32bit skip 64 and 128 - the minimal capacity we want there is in alloc_fdtable()
191 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise in alloc_fdtable()
197 return ERR_PTR(-EMFILE); in alloc_fdtable()
206 * a process tries to use a file descriptor near that limit. For example, in alloc_fdtable()
207 * if sysctl_nr_open is set to 1073741816 (0x3ffffff8) - which is what in alloc_fdtable()
208 * systemd typically sets it to - then trying to use a file descriptor in alloc_fdtable()
213 return ERR_PTR(-EMFILE); in alloc_fdtable()
218 fdt->max_fds = nr; in alloc_fdtable()
222 fdt->fd = data; in alloc_fdtable()
229 fdt->open_fds = data; in alloc_fdtable()
231 fdt->close_on_exec = data; in alloc_fdtable()
233 fdt->full_fds_bits = data; in alloc_fdtable()
238 kvfree(fdt->fd); in alloc_fdtable()
242 return ERR_PTR(-ENOMEM); in alloc_fdtable()
250 * The files->file_lock should be held on entry, and will be held on exit.
253 __releases(files->file_lock) in expand_fdtable()
254 __acquires(files->file_lock) in expand_fdtable()
258 spin_unlock(&files->file_lock); in expand_fdtable()
264 if (atomic_read(&files->count) > 1) in expand_fdtable()
267 spin_lock(&files->file_lock); in expand_fdtable()
271 BUG_ON(nr < cur_fdt->max_fds); in expand_fdtable()
273 rcu_assign_pointer(files->fdt, new_fdt); in expand_fdtable()
274 if (cur_fdt != &files->fdtab) in expand_fdtable()
275 call_rcu(&cur_fdt->rcu, free_fdtable_rcu); in expand_fdtable()
284 * the current capacity and there is room for expansion.
286 * The files->file_lock should be held on entry, and will be held on exit.
289 __releases(files->file_lock) in expand_files()
290 __acquires(files->file_lock) in expand_files()
299 if (nr < fdt->max_fds) in expand_files()
302 if (unlikely(files->resize_in_progress)) { in expand_files()
303 spin_unlock(&files->file_lock); in expand_files()
304 wait_event(files->resize_wait, !files->resize_in_progress); in expand_files()
305 spin_lock(&files->file_lock); in expand_files()
311 return -EMFILE; in expand_files()
314 files->resize_in_progress = true; in expand_files()
316 files->resize_in_progress = false; in expand_files()
318 wake_up_all(&files->resize_wait); in expand_files()
326 __set_bit(fd, fdt->close_on_exec); in __set_close_on_exec()
328 if (test_bit(fd, fdt->close_on_exec)) in __set_close_on_exec()
329 __clear_bit(fd, fdt->close_on_exec); in __set_close_on_exec()
335 __set_bit(fd, fdt->open_fds); in __set_open_fd()
338 if (!~fdt->open_fds[fd]) in __set_open_fd()
339 __set_bit(fd, fdt->full_fds_bits); in __set_open_fd()
344 __clear_bit(fd, fdt->open_fds); in __clear_open_fd()
346 if (test_bit(fd, fdt->full_fds_bits)) in __clear_open_fd()
347 __clear_bit(fd, fdt->full_fds_bits); in __clear_open_fd()
352 return test_bit(fd, fdt->open_fds); in fd_is_open()
359 * punch_hole is optional - when close_range() is asked to unshare
366 unsigned int last = find_last_bit(fdt->open_fds, fdt->max_fds); in sane_fdtable_size()
368 if (last == fdt->max_fds) in sane_fdtable_size()
370 if (punch_hole && punch_hole->to >= last && punch_hole->from <= last) { in sane_fdtable_size()
371 last = find_last_bit(fdt->open_fds, punch_hole->from); in sane_fdtable_size()
372 if (last == punch_hole->from) in sane_fdtable_size()
392 return ERR_PTR(-ENOMEM); in dup_fd()
394 atomic_set(&newf->count, 1); in dup_fd()
396 spin_lock_init(&newf->file_lock); in dup_fd()
397 newf->resize_in_progress = false; in dup_fd()
398 init_waitqueue_head(&newf->resize_wait); in dup_fd()
399 newf->next_fd = 0; in dup_fd()
400 new_fdt = &newf->fdtab; in dup_fd()
401 new_fdt->max_fds = NR_OPEN_DEFAULT; in dup_fd()
402 new_fdt->close_on_exec = newf->close_on_exec_init; in dup_fd()
403 new_fdt->open_fds = newf->open_fds_init; in dup_fd()
404 new_fdt->full_fds_bits = newf->full_fds_bits_init; in dup_fd()
405 new_fdt->fd = &newf->fd_array[0]; in dup_fd()
407 spin_lock(&oldf->file_lock); in dup_fd()
414 while (unlikely(open_files > new_fdt->max_fds)) { in dup_fd()
415 spin_unlock(&oldf->file_lock); in dup_fd()
417 if (new_fdt != &newf->fdtab) in dup_fd()
431 spin_lock(&oldf->file_lock); in dup_fd()
438 old_fds = old_fdt->fd; in dup_fd()
439 new_fds = new_fdt->fd; in dup_fd()
443 * files_struct, despite holding ->file_lock. in dup_fd()
449 * At the same time we know no files will disappear as all other in dup_fd()
453 * ref the file if we see it and mark the fd slot as unused otherwise. in dup_fd()
455 for (i = open_files; i != 0; i--) { in dup_fd()
460 __clear_open_fd(open_files - i, new_fdt); in dup_fd()
464 spin_unlock(&oldf->file_lock); in dup_fd()
467 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *)); in dup_fd()
469 rcu_assign_pointer(newf->fdt, new_fdt); in dup_fd()
478 * ->file_lock because this is the last reference to the in close_files()
481 struct fdtable *fdt = rcu_dereference_raw(files->fdt); in close_files()
487 if (i >= fdt->max_fds) in close_files()
489 set = fdt->open_fds[j++]; in close_files()
492 struct file *file = fdt->fd[i]; in close_files()
508 if (atomic_dec_and_test(&files->count)) { in put_files_struct()
512 if (fdt != &files->fdtab) in put_files_struct()
520 struct files_struct * files = tsk->files; in exit_files()
524 tsk->files = NULL; in exit_files()
546 unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */ in find_next_fd()
554 bit = find_next_zero_bit(&fdt->open_fds[bitbit], BITS_PER_LONG, in find_next_fd()
555 start & (BITS_PER_LONG - 1)); in find_next_fd()
559 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG; in find_next_fd()
564 return find_next_zero_bit(fdt->open_fds, maxfd, start); in find_next_fd()
572 struct files_struct *files = current->files; in alloc_fd()
577 spin_lock(&files->file_lock); in alloc_fd()
581 if (fd < files->next_fd) in alloc_fd()
582 fd = files->next_fd; in alloc_fd()
584 if (likely(fd < fdt->max_fds)) in alloc_fd()
589 * will limit the total number of files that can be opened. in alloc_fd()
591 error = -EMFILE; in alloc_fd()
595 if (unlikely(fd >= fdt->max_fds)) { in alloc_fd()
603 if (start <= files->next_fd) in alloc_fd()
604 files->next_fd = fd + 1; in alloc_fd()
608 VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL); in alloc_fd()
611 spin_unlock(&files->file_lock); in alloc_fd()
630 if (fd < files->next_fd) in __put_unused_fd()
631 files->next_fd = fd; in __put_unused_fd()
636 struct files_struct *files = current->files; in put_unused_fd()
637 spin_lock(&files->file_lock); in put_unused_fd()
639 spin_unlock(&files->file_lock); in put_unused_fd()
645 * fd_install - install a file pointer in the fd array
654 struct files_struct *files = current->files; in fd_install()
657 if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING))) in fd_install()
662 if (unlikely(files->resize_in_progress)) { in fd_install()
664 spin_lock(&files->file_lock); in fd_install()
666 VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL); in fd_install()
667 rcu_assign_pointer(fdt->fd[fd], file); in fd_install()
668 spin_unlock(&files->file_lock); in fd_install()
673 fdt = rcu_dereference_sched(files->fdt); in fd_install()
674 VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL); in fd_install()
675 rcu_assign_pointer(fdt->fd[fd], file); in fd_install()
682 * file_close_fd_locked - return file associated with fd
697 lockdep_assert_held(&files->file_lock); in file_close_fd_locked()
699 if (fd >= fdt->max_fds) in file_close_fd_locked()
702 fd = array_index_nospec(fd, fdt->max_fds); in file_close_fd_locked()
703 file = rcu_dereference_raw(fdt->fd[fd]); in file_close_fd_locked()
705 rcu_assign_pointer(fdt->fd[fd], NULL); in file_close_fd_locked()
713 struct files_struct *files = current->files; in close_fd()
716 spin_lock(&files->file_lock); in close_fd()
718 spin_unlock(&files->file_lock); in close_fd()
720 return -EBADF; in close_fd()
727 * last_fd - return last valid index into fd table
736 return fdt->max_fds - 1; in last_fd()
745 spin_lock(&cur_fds->file_lock); in __range_cloexec()
749 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1); in __range_cloexec()
750 spin_unlock(&cur_fds->file_lock); in __range_cloexec()
759 spin_lock(&files->file_lock); in __range_close()
766 spin_unlock(&files->file_lock); in __range_close()
769 spin_lock(&files->file_lock); in __range_close()
771 spin_unlock(&files->file_lock); in __range_close()
773 spin_lock(&files->file_lock); in __range_close()
776 spin_unlock(&files->file_lock); in __range_close()
780 * sys_close_range() - Close all file descriptors in a given range.
793 struct task_struct *me = current; in SYSCALL_DEFINE3()
794 struct files_struct *cur_fds = me->files, *fds = NULL; in SYSCALL_DEFINE3()
797 return -EINVAL; in SYSCALL_DEFINE3()
800 return -EINVAL; in SYSCALL_DEFINE3()
802 if ((flags & CLOSE_RANGE_UNSHARE) && atomic_read(&cur_fds->count) > 1) { in SYSCALL_DEFINE3()
834 me->files = cur_fds; in SYSCALL_DEFINE3()
843 * file_close_fd - return file associated with fd
852 struct files_struct *files = current->files; in file_close_fd()
855 spin_lock(&files->file_lock); in file_close_fd()
857 spin_unlock(&files->file_lock); in file_close_fd()
868 spin_lock(&files->file_lock); in do_close_on_exec()
873 if (fd >= fdt->max_fds) in do_close_on_exec()
875 set = fdt->close_on_exec[i]; in do_close_on_exec()
878 fdt->close_on_exec[i] = 0; in do_close_on_exec()
883 file = fdt->fd[fd]; in do_close_on_exec()
886 rcu_assign_pointer(fdt->fd[fd], NULL); in do_close_on_exec()
888 spin_unlock(&files->file_lock); in do_close_on_exec()
891 spin_lock(&files->file_lock); in do_close_on_exec()
895 spin_unlock(&files->file_lock); in do_close_on_exec()
908 if (unlikely(!file_ref_get(&file->f_ref))) in __get_file_rcu()
909 return ERR_PTR(-EAGAIN); in __get_file_rcu()
927 * matches the current file, we know we have successfully in __get_file_rcu()
937 return ERR_PTR(-EAGAIN); in __get_file_rcu()
941 * get_file_rcu - try go get a reference to a file under rcu
965 * get_file_active - try go get a reference to a file
994 struct fdtable *fdt = rcu_dereference_raw(files->fdt); in __fget_files_rcu()
999 nospec_mask = array_index_mask_nospec(fd, fdt->max_fds); in __fget_files_rcu()
1002 * fdentry points to the 'fd' offset, or fdt->fd[0]. in __fget_files_rcu()
1003 * Loading from fdt->fd[0] is always safe, because the in __fget_files_rcu()
1006 fdentry = fdt->fd + (fd & nospec_mask); in __fget_files_rcu()
1025 if (unlikely(!file_ref_get(&file->f_ref))) in __fget_files_rcu()
1031 * (a) the file ref already went down to zero and the in __fget_files_rcu()
1036 * Note that we don't need to re-check the 'fdt->fd' in __fget_files_rcu()
1038 * hand-in-hand with 'fdt'. in __fget_files_rcu()
1040 * If so, we need to put our ref and try again. in __fget_files_rcu()
1043 unlikely(rcu_dereference_raw(files->fdt) != fdt)) { in __fget_files_rcu()
1052 if (unlikely(file->f_mode & mask)) { in __fget_files_rcu()
1058 * Ok, we have a ref to the file, and checked that it in __fget_files_rcu()
1079 return __fget_files(current->files, fd, mask); in __fget()
1099 if (task->files) in fget_task()
1100 file = __fget_files(task->files, fd, 0); in fget_task()
1114 files = task->files; in fget_task_next()
1117 for (; fd < files_fdtable(files)->max_fds; fd++) { in fget_task_next()
1131 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
1140 * 3) You must not clone the current task in between the calls to fget_light
1155 struct files_struct *files = current->files; in __fget_light()
1161 * entry combined with the new refcount - otherwise we could in __fget_light()
1167 if (likely(atomic_read_acquire(&files->count) == 1)) { in __fget_light()
1169 if (!file || unlikely(file->f_mode & mask)) in __fget_light()
1202 if (!(file->f_mode & FMODE_ATOMIC_POS)) in file_needs_f_pos_lock()
1204 if (__file_ref_read_raw(&file->f_ref) != FILE_REF_ONEREF) in file_needs_f_pos_lock()
1206 if (file->f_op->iterate_shared) in file_needs_f_pos_lock()
1213 if (!(file->f_mode & FMODE_ATOMIC_POS) && !file->f_op->iterate_shared) in file_seek_cur_needs_f_lock()
1232 mutex_lock(&file->f_pos_lock); in fdget_pos()
1239 mutex_unlock(&f->f_pos_lock); in __f_unlock_pos()
1250 struct files_struct *files = current->files; in set_close_on_exec()
1251 spin_lock(&files->file_lock); in set_close_on_exec()
1253 spin_unlock(&files->file_lock); in set_close_on_exec()
1260 res = close_on_exec(fd, current->files); in get_close_on_exec()
1267 __releases(&files->file_lock) in do_dup2()
1274 * (if any). However, userspace hand-picking a fd may be racing against in do_dup2()
1279 * fd = get_unused_fd_flags(); // fd slot reserved, ->fd[fd] == NULL in do_dup2()
1281 * fd_install(fd, file); // only now ->fd[fd] == file in do_dup2()
1286 * If we fit the window, we have the fd to populate, yet no target file in do_dup2()
1296 * POSIX is silent on the issue, we return -EBUSY. in do_dup2()
1299 fd = array_index_nospec(fd, fdt->max_fds); in do_dup2()
1300 tofree = rcu_dereference_raw(fdt->fd[fd]); in do_dup2()
1304 rcu_assign_pointer(fdt->fd[fd], file); in do_dup2()
1306 spin_unlock(&files->file_lock); in do_dup2()
1314 spin_unlock(&files->file_lock); in do_dup2()
1315 return -EBUSY; in do_dup2()
1321 struct files_struct *files = current->files; in replace_fd()
1327 return -EBADF; in replace_fd()
1329 spin_lock(&files->file_lock); in replace_fd()
1339 spin_unlock(&files->file_lock); in replace_fd()
1344 * receive_fd() - Install received file into file descriptor table
1351 * @ufd is non-NULL.
1356 * Returns newly install fd or -ve on error.
1401 int err = -EBADF; in ksys_dup3()
1403 struct files_struct *files = current->files; in ksys_dup3()
1406 return -EINVAL; in ksys_dup3()
1409 return -EINVAL; in ksys_dup3()
1412 return -EBADF; in ksys_dup3()
1414 spin_lock(&files->file_lock); in ksys_dup3()
1420 if (err == -EMFILE) in ksys_dup3()
1427 err = -EBADF; in ksys_dup3()
1429 spin_unlock(&files->file_lock); in ksys_dup3()
1441 struct files_struct *files = current->files; in SYSCALL_DEFINE2()
1448 retval = -EBADF; in SYSCALL_DEFINE2()
1459 int ret = -EBADF; in SYSCALL_DEFINE1()
1477 return -EINVAL; in f_dupfd()
1494 spin_lock(&files->file_lock); in iterate_fd()
1495 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { in iterate_fd()
1497 file = rcu_dereference_check_fdtable(files, fdt->fd[n]); in iterate_fd()
1504 spin_unlock(&files->file_lock); in iterate_fd()