xref: /linux/fs/file.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/file.c
4  *
5  *  Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6  *
7  *  Manage the dynamic fd arrays in the process files_struct.
8  */
9 
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
12 #include <linux/fs.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/close_range.h>
23 #include <linux/file_ref.h>
24 #include <net/sock.h>
25 
26 #include "internal.h"
27 
28 /**
29  * __file_ref_put - Slowpath of file_ref_put()
30  * @ref:	Pointer to the reference count
31  * @cnt:	Current reference count
32  *
33  * Invoked when the reference count is outside of the valid zone.
34  *
35  * Return:
36  *	True if this was the last reference with no future references
37  *	possible. This signals the caller that it can safely schedule the
38  *	object, which is protected by the reference counter, for
39  *	deconstruction.
40  *
41  *	False if there are still active references or the put() raced
42  *	with a concurrent get()/put() pair. Caller is not allowed to
43  *	deconstruct the protected object.
44  */
__file_ref_put(file_ref_t * ref,unsigned long cnt)45 bool __file_ref_put(file_ref_t *ref, unsigned long cnt)
46 {
47 	/* Did this drop the last reference? */
48 	if (likely(cnt == FILE_REF_NOREF)) {
49 		/*
50 		 * Carefully try to set the reference count to FILE_REF_DEAD.
51 		 *
52 		 * This can fail if a concurrent get() operation has
53 		 * elevated it again or the corresponding put() even marked
54 		 * it dead already. Both are valid situations and do not
55 		 * require a retry. If this fails the caller is not
56 		 * allowed to deconstruct the object.
57 		 */
58 		if (!atomic_long_try_cmpxchg_release(&ref->refcnt, &cnt, FILE_REF_DEAD))
59 			return false;
60 
61 		/*
62 		 * The caller can safely schedule the object for
63 		 * deconstruction. Provide acquire ordering.
64 		 */
65 		smp_acquire__after_ctrl_dep();
66 		return true;
67 	}
68 
69 	/*
70 	 * If the reference count was already in the dead zone, then this
71 	 * put() operation is imbalanced. Warn, put the reference count back to
72 	 * DEAD and tell the caller to not deconstruct the object.
73 	 */
74 	if (WARN_ONCE(cnt >= FILE_REF_RELEASED, "imbalanced put on file reference count")) {
75 		atomic_long_set(&ref->refcnt, FILE_REF_DEAD);
76 		return false;
77 	}
78 
79 	/*
80 	 * This is a put() operation on a saturated refcount. Restore the
81 	 * mean saturation value and tell the caller to not deconstruct the
82 	 * object.
83 	 */
84 	if (cnt > FILE_REF_MAXREF)
85 		atomic_long_set(&ref->refcnt, FILE_REF_SATURATED);
86 	return false;
87 }
88 EXPORT_SYMBOL_GPL(__file_ref_put);
89 
90 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
91 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
92 /* our min() is unusable in constant expressions ;-/ */
93 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
94 unsigned int sysctl_nr_open_max =
95 	__const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
96 
__free_fdtable(struct fdtable * fdt)97 static void __free_fdtable(struct fdtable *fdt)
98 {
99 	kvfree(fdt->fd);
100 	kvfree(fdt->open_fds);
101 	kfree(fdt);
102 }
103 
free_fdtable_rcu(struct rcu_head * rcu)104 static void free_fdtable_rcu(struct rcu_head *rcu)
105 {
106 	__free_fdtable(container_of(rcu, struct fdtable, rcu));
107 }
108 
109 #define BITBIT_NR(nr)	BITS_TO_LONGS(BITS_TO_LONGS(nr))
110 #define BITBIT_SIZE(nr)	(BITBIT_NR(nr) * sizeof(long))
111 
112 #define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
113 /*
114  * Copy 'count' fd bits from the old table to the new table and clear the extra
115  * space if any.  This does not copy the file pointers.  Called with the files
116  * spinlock held for write.
117  */
copy_fd_bitmaps(struct fdtable * nfdt,struct fdtable * ofdt,unsigned int copy_words)118 static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
119 			    unsigned int copy_words)
120 {
121 	unsigned int nwords = fdt_words(nfdt);
122 
123 	bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds,
124 			copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
125 	bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec,
126 			copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
127 	bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits,
128 			copy_words, nwords);
129 }
130 
131 /*
132  * Copy all file descriptors from the old table to the new, expanded table and
133  * clear the extra space.  Called with the files spinlock held for write.
134  */
copy_fdtable(struct fdtable * nfdt,struct fdtable * ofdt)135 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
136 {
137 	size_t cpy, set;
138 
139 	BUG_ON(nfdt->max_fds < ofdt->max_fds);
140 
141 	cpy = ofdt->max_fds * sizeof(struct file *);
142 	set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
143 	memcpy(nfdt->fd, ofdt->fd, cpy);
144 	memset((char *)nfdt->fd + cpy, 0, set);
145 
146 	copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt));
147 }
148 
149 /*
150  * Note how the fdtable bitmap allocations very much have to be a multiple of
151  * BITS_PER_LONG. This is not only because we walk those things in chunks of
152  * 'unsigned long' in some places, but simply because that is how the Linux
153  * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
154  * they are very much "bits in an array of unsigned long".
155  */
alloc_fdtable(unsigned int slots_wanted)156 static struct fdtable *alloc_fdtable(unsigned int slots_wanted)
157 {
158 	struct fdtable *fdt;
159 	unsigned int nr;
160 	void *data;
161 
162 	/*
163 	 * Figure out how many fds we actually want to support in this fdtable.
164 	 * Allocation steps are keyed to the size of the fdarray, since it
165 	 * grows far faster than any of the other dynamic data. We try to fit
166 	 * the fdarray into comfortable page-tuned chunks: starting at 1024B
167 	 * and growing in powers of two from there on.  Since we called only
168 	 * with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab
169 	 * already gives BITS_PER_LONG slots), the above boils down to
170 	 * 1.  use the smallest power of two large enough to give us that many
171 	 * slots.
172 	 * 2.  on 32bit skip 64 and 128 - the minimal capacity we want there is
173 	 * 256 slots (i.e. 1Kb fd array).
174 	 * 3.  on 64bit don't skip anything, 1Kb fd array means 128 slots there
175 	 * and we are never going to be asked for 64 or less.
176 	 */
177 	if (IS_ENABLED(CONFIG_32BIT) && slots_wanted < 256)
178 		nr = 256;
179 	else
180 		nr = roundup_pow_of_two(slots_wanted);
181 	/*
182 	 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
183 	 * had been set lower between the check in expand_files() and here.
184 	 *
185 	 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
186 	 * bitmaps handling below becomes unpleasant, to put it mildly...
187 	 */
188 	if (unlikely(nr > sysctl_nr_open)) {
189 		nr = round_down(sysctl_nr_open, BITS_PER_LONG);
190 		if (nr < slots_wanted)
191 			return ERR_PTR(-EMFILE);
192 	}
193 
194 	fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
195 	if (!fdt)
196 		goto out;
197 	fdt->max_fds = nr;
198 	data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
199 	if (!data)
200 		goto out_fdt;
201 	fdt->fd = data;
202 
203 	data = kvmalloc(max_t(size_t,
204 				 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
205 				 GFP_KERNEL_ACCOUNT);
206 	if (!data)
207 		goto out_arr;
208 	fdt->open_fds = data;
209 	data += nr / BITS_PER_BYTE;
210 	fdt->close_on_exec = data;
211 	data += nr / BITS_PER_BYTE;
212 	fdt->full_fds_bits = data;
213 
214 	return fdt;
215 
216 out_arr:
217 	kvfree(fdt->fd);
218 out_fdt:
219 	kfree(fdt);
220 out:
221 	return ERR_PTR(-ENOMEM);
222 }
223 
224 /*
225  * Expand the file descriptor table.
226  * This function will allocate a new fdtable and both fd array and fdset, of
227  * the given size.
228  * Return <0 error code on error; 0 on successful completion.
229  * The files->file_lock should be held on entry, and will be held on exit.
230  */
expand_fdtable(struct files_struct * files,unsigned int nr)231 static int expand_fdtable(struct files_struct *files, unsigned int nr)
232 	__releases(files->file_lock)
233 	__acquires(files->file_lock)
234 {
235 	struct fdtable *new_fdt, *cur_fdt;
236 
237 	spin_unlock(&files->file_lock);
238 	new_fdt = alloc_fdtable(nr + 1);
239 
240 	/* make sure all fd_install() have seen resize_in_progress
241 	 * or have finished their rcu_read_lock_sched() section.
242 	 */
243 	if (atomic_read(&files->count) > 1)
244 		synchronize_rcu();
245 
246 	spin_lock(&files->file_lock);
247 	if (IS_ERR(new_fdt))
248 		return PTR_ERR(new_fdt);
249 	cur_fdt = files_fdtable(files);
250 	BUG_ON(nr < cur_fdt->max_fds);
251 	copy_fdtable(new_fdt, cur_fdt);
252 	rcu_assign_pointer(files->fdt, new_fdt);
253 	if (cur_fdt != &files->fdtab)
254 		call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
255 	/* coupled with smp_rmb() in fd_install() */
256 	smp_wmb();
257 	return 0;
258 }
259 
260 /*
261  * Expand files.
262  * This function will expand the file structures, if the requested size exceeds
263  * the current capacity and there is room for expansion.
264  * Return <0 error code on error; 0 on success.
265  * The files->file_lock should be held on entry, and will be held on exit.
266  */
expand_files(struct files_struct * files,unsigned int nr)267 static int expand_files(struct files_struct *files, unsigned int nr)
268 	__releases(files->file_lock)
269 	__acquires(files->file_lock)
270 {
271 	struct fdtable *fdt;
272 	int error;
273 
274 repeat:
275 	fdt = files_fdtable(files);
276 
277 	/* Do we need to expand? */
278 	if (nr < fdt->max_fds)
279 		return 0;
280 
281 	/* Can we expand? */
282 	if (nr >= sysctl_nr_open)
283 		return -EMFILE;
284 
285 	if (unlikely(files->resize_in_progress)) {
286 		spin_unlock(&files->file_lock);
287 		wait_event(files->resize_wait, !files->resize_in_progress);
288 		spin_lock(&files->file_lock);
289 		goto repeat;
290 	}
291 
292 	/* All good, so we try */
293 	files->resize_in_progress = true;
294 	error = expand_fdtable(files, nr);
295 	files->resize_in_progress = false;
296 
297 	wake_up_all(&files->resize_wait);
298 	return error;
299 }
300 
__set_close_on_exec(unsigned int fd,struct fdtable * fdt,bool set)301 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt,
302 				       bool set)
303 {
304 	if (set) {
305 		__set_bit(fd, fdt->close_on_exec);
306 	} else {
307 		if (test_bit(fd, fdt->close_on_exec))
308 			__clear_bit(fd, fdt->close_on_exec);
309 	}
310 }
311 
__set_open_fd(unsigned int fd,struct fdtable * fdt,bool set)312 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt, bool set)
313 {
314 	__set_bit(fd, fdt->open_fds);
315 	__set_close_on_exec(fd, fdt, set);
316 	fd /= BITS_PER_LONG;
317 	if (!~fdt->open_fds[fd])
318 		__set_bit(fd, fdt->full_fds_bits);
319 }
320 
__clear_open_fd(unsigned int fd,struct fdtable * fdt)321 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
322 {
323 	__clear_bit(fd, fdt->open_fds);
324 	fd /= BITS_PER_LONG;
325 	if (test_bit(fd, fdt->full_fds_bits))
326 		__clear_bit(fd, fdt->full_fds_bits);
327 }
328 
fd_is_open(unsigned int fd,const struct fdtable * fdt)329 static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt)
330 {
331 	return test_bit(fd, fdt->open_fds);
332 }
333 
334 /*
335  * Note that a sane fdtable size always has to be a multiple of
336  * BITS_PER_LONG, since we have bitmaps that are sized by this.
337  *
338  * punch_hole is optional - when close_range() is asked to unshare
339  * and close, we don't need to copy descriptors in that range, so
340  * a smaller cloned descriptor table might suffice if the last
341  * currently opened descriptor falls into that range.
342  */
sane_fdtable_size(struct fdtable * fdt,struct fd_range * punch_hole)343 static unsigned int sane_fdtable_size(struct fdtable *fdt, struct fd_range *punch_hole)
344 {
345 	unsigned int last = find_last_bit(fdt->open_fds, fdt->max_fds);
346 
347 	if (last == fdt->max_fds)
348 		return NR_OPEN_DEFAULT;
349 	if (punch_hole && punch_hole->to >= last && punch_hole->from <= last) {
350 		last = find_last_bit(fdt->open_fds, punch_hole->from);
351 		if (last == punch_hole->from)
352 			return NR_OPEN_DEFAULT;
353 	}
354 	return ALIGN(last + 1, BITS_PER_LONG);
355 }
356 
357 /*
358  * Allocate a new descriptor table and copy contents from the passed in
359  * instance.  Returns a pointer to cloned table on success, ERR_PTR()
360  * on failure.  For 'punch_hole' see sane_fdtable_size().
361  */
dup_fd(struct files_struct * oldf,struct fd_range * punch_hole)362 struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_hole)
363 {
364 	struct files_struct *newf;
365 	struct file **old_fds, **new_fds;
366 	unsigned int open_files, i;
367 	struct fdtable *old_fdt, *new_fdt;
368 
369 	newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
370 	if (!newf)
371 		return ERR_PTR(-ENOMEM);
372 
373 	atomic_set(&newf->count, 1);
374 
375 	spin_lock_init(&newf->file_lock);
376 	newf->resize_in_progress = false;
377 	init_waitqueue_head(&newf->resize_wait);
378 	newf->next_fd = 0;
379 	new_fdt = &newf->fdtab;
380 	new_fdt->max_fds = NR_OPEN_DEFAULT;
381 	new_fdt->close_on_exec = newf->close_on_exec_init;
382 	new_fdt->open_fds = newf->open_fds_init;
383 	new_fdt->full_fds_bits = newf->full_fds_bits_init;
384 	new_fdt->fd = &newf->fd_array[0];
385 
386 	spin_lock(&oldf->file_lock);
387 	old_fdt = files_fdtable(oldf);
388 	open_files = sane_fdtable_size(old_fdt, punch_hole);
389 
390 	/*
391 	 * Check whether we need to allocate a larger fd array and fd set.
392 	 */
393 	while (unlikely(open_files > new_fdt->max_fds)) {
394 		spin_unlock(&oldf->file_lock);
395 
396 		if (new_fdt != &newf->fdtab)
397 			__free_fdtable(new_fdt);
398 
399 		new_fdt = alloc_fdtable(open_files);
400 		if (IS_ERR(new_fdt)) {
401 			kmem_cache_free(files_cachep, newf);
402 			return ERR_CAST(new_fdt);
403 		}
404 
405 		/*
406 		 * Reacquire the oldf lock and a pointer to its fd table
407 		 * who knows it may have a new bigger fd table. We need
408 		 * the latest pointer.
409 		 */
410 		spin_lock(&oldf->file_lock);
411 		old_fdt = files_fdtable(oldf);
412 		open_files = sane_fdtable_size(old_fdt, punch_hole);
413 	}
414 
415 	copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG);
416 
417 	old_fds = old_fdt->fd;
418 	new_fds = new_fdt->fd;
419 
420 	for (i = open_files; i != 0; i--) {
421 		struct file *f = *old_fds++;
422 		if (f) {
423 			get_file(f);
424 		} else {
425 			/*
426 			 * The fd may be claimed in the fd bitmap but not yet
427 			 * instantiated in the files array if a sibling thread
428 			 * is partway through open().  So make sure that this
429 			 * fd is available to the new process.
430 			 */
431 			__clear_open_fd(open_files - i, new_fdt);
432 		}
433 		rcu_assign_pointer(*new_fds++, f);
434 	}
435 	spin_unlock(&oldf->file_lock);
436 
437 	/* clear the remainder */
438 	memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
439 
440 	rcu_assign_pointer(newf->fdt, new_fdt);
441 
442 	return newf;
443 }
444 
close_files(struct files_struct * files)445 static struct fdtable *close_files(struct files_struct * files)
446 {
447 	/*
448 	 * It is safe to dereference the fd table without RCU or
449 	 * ->file_lock because this is the last reference to the
450 	 * files structure.
451 	 */
452 	struct fdtable *fdt = rcu_dereference_raw(files->fdt);
453 	unsigned int i, j = 0;
454 
455 	for (;;) {
456 		unsigned long set;
457 		i = j * BITS_PER_LONG;
458 		if (i >= fdt->max_fds)
459 			break;
460 		set = fdt->open_fds[j++];
461 		while (set) {
462 			if (set & 1) {
463 				struct file *file = fdt->fd[i];
464 				if (file) {
465 					filp_close(file, files);
466 					cond_resched();
467 				}
468 			}
469 			i++;
470 			set >>= 1;
471 		}
472 	}
473 
474 	return fdt;
475 }
476 
put_files_struct(struct files_struct * files)477 void put_files_struct(struct files_struct *files)
478 {
479 	if (atomic_dec_and_test(&files->count)) {
480 		struct fdtable *fdt = close_files(files);
481 
482 		/* free the arrays if they are not embedded */
483 		if (fdt != &files->fdtab)
484 			__free_fdtable(fdt);
485 		kmem_cache_free(files_cachep, files);
486 	}
487 }
488 
exit_files(struct task_struct * tsk)489 void exit_files(struct task_struct *tsk)
490 {
491 	struct files_struct * files = tsk->files;
492 
493 	if (files) {
494 		task_lock(tsk);
495 		tsk->files = NULL;
496 		task_unlock(tsk);
497 		put_files_struct(files);
498 	}
499 }
500 
501 struct files_struct init_files = {
502 	.count		= ATOMIC_INIT(1),
503 	.fdt		= &init_files.fdtab,
504 	.fdtab		= {
505 		.max_fds	= NR_OPEN_DEFAULT,
506 		.fd		= &init_files.fd_array[0],
507 		.close_on_exec	= init_files.close_on_exec_init,
508 		.open_fds	= init_files.open_fds_init,
509 		.full_fds_bits	= init_files.full_fds_bits_init,
510 	},
511 	.file_lock	= __SPIN_LOCK_UNLOCKED(init_files.file_lock),
512 	.resize_wait	= __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
513 };
514 
find_next_fd(struct fdtable * fdt,unsigned int start)515 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
516 {
517 	unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
518 	unsigned int maxbit = maxfd / BITS_PER_LONG;
519 	unsigned int bitbit = start / BITS_PER_LONG;
520 	unsigned int bit;
521 
522 	/*
523 	 * Try to avoid looking at the second level bitmap
524 	 */
525 	bit = find_next_zero_bit(&fdt->open_fds[bitbit], BITS_PER_LONG,
526 				 start & (BITS_PER_LONG - 1));
527 	if (bit < BITS_PER_LONG)
528 		return bit + bitbit * BITS_PER_LONG;
529 
530 	bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
531 	if (bitbit >= maxfd)
532 		return maxfd;
533 	if (bitbit > start)
534 		start = bitbit;
535 	return find_next_zero_bit(fdt->open_fds, maxfd, start);
536 }
537 
538 /*
539  * allocate a file descriptor, mark it busy.
540  */
alloc_fd(unsigned start,unsigned end,unsigned flags)541 static int alloc_fd(unsigned start, unsigned end, unsigned flags)
542 {
543 	struct files_struct *files = current->files;
544 	unsigned int fd;
545 	int error;
546 	struct fdtable *fdt;
547 
548 	spin_lock(&files->file_lock);
549 repeat:
550 	fdt = files_fdtable(files);
551 	fd = start;
552 	if (fd < files->next_fd)
553 		fd = files->next_fd;
554 
555 	if (likely(fd < fdt->max_fds))
556 		fd = find_next_fd(fdt, fd);
557 
558 	/*
559 	 * N.B. For clone tasks sharing a files structure, this test
560 	 * will limit the total number of files that can be opened.
561 	 */
562 	error = -EMFILE;
563 	if (unlikely(fd >= end))
564 		goto out;
565 
566 	if (unlikely(fd >= fdt->max_fds)) {
567 		error = expand_files(files, fd);
568 		if (error < 0)
569 			goto out;
570 
571 		goto repeat;
572 	}
573 
574 	if (start <= files->next_fd)
575 		files->next_fd = fd + 1;
576 
577 	__set_open_fd(fd, fdt, flags & O_CLOEXEC);
578 	error = fd;
579 
580 out:
581 	spin_unlock(&files->file_lock);
582 	return error;
583 }
584 
__get_unused_fd_flags(unsigned flags,unsigned long nofile)585 int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
586 {
587 	return alloc_fd(0, nofile, flags);
588 }
589 
get_unused_fd_flags(unsigned flags)590 int get_unused_fd_flags(unsigned flags)
591 {
592 	return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
593 }
594 EXPORT_SYMBOL(get_unused_fd_flags);
595 
__put_unused_fd(struct files_struct * files,unsigned int fd)596 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
597 {
598 	struct fdtable *fdt = files_fdtable(files);
599 	__clear_open_fd(fd, fdt);
600 	if (fd < files->next_fd)
601 		files->next_fd = fd;
602 }
603 
put_unused_fd(unsigned int fd)604 void put_unused_fd(unsigned int fd)
605 {
606 	struct files_struct *files = current->files;
607 	spin_lock(&files->file_lock);
608 	__put_unused_fd(files, fd);
609 	spin_unlock(&files->file_lock);
610 }
611 
612 EXPORT_SYMBOL(put_unused_fd);
613 
614 /*
615  * Install a file pointer in the fd array.
616  *
617  * The VFS is full of places where we drop the files lock between
618  * setting the open_fds bitmap and installing the file in the file
619  * array.  At any such point, we are vulnerable to a dup2() race
620  * installing a file in the array before us.  We need to detect this and
621  * fput() the struct file we are about to overwrite in this case.
622  *
623  * It should never happen - if we allow dup2() do it, _really_ bad things
624  * will follow.
625  *
626  * This consumes the "file" refcount, so callers should treat it
627  * as if they had called fput(file).
628  */
629 
fd_install(unsigned int fd,struct file * file)630 void fd_install(unsigned int fd, struct file *file)
631 {
632 	struct files_struct *files = current->files;
633 	struct fdtable *fdt;
634 
635 	if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING)))
636 		return;
637 
638 	rcu_read_lock_sched();
639 
640 	if (unlikely(files->resize_in_progress)) {
641 		rcu_read_unlock_sched();
642 		spin_lock(&files->file_lock);
643 		fdt = files_fdtable(files);
644 		WARN_ON(fdt->fd[fd] != NULL);
645 		rcu_assign_pointer(fdt->fd[fd], file);
646 		spin_unlock(&files->file_lock);
647 		return;
648 	}
649 	/* coupled with smp_wmb() in expand_fdtable() */
650 	smp_rmb();
651 	fdt = rcu_dereference_sched(files->fdt);
652 	BUG_ON(fdt->fd[fd] != NULL);
653 	rcu_assign_pointer(fdt->fd[fd], file);
654 	rcu_read_unlock_sched();
655 }
656 
657 EXPORT_SYMBOL(fd_install);
658 
659 /**
660  * file_close_fd_locked - return file associated with fd
661  * @files: file struct to retrieve file from
662  * @fd: file descriptor to retrieve file for
663  *
664  * Doesn't take a separate reference count.
665  *
666  * Context: files_lock must be held.
667  *
668  * Returns: The file associated with @fd (NULL if @fd is not open)
669  */
file_close_fd_locked(struct files_struct * files,unsigned fd)670 struct file *file_close_fd_locked(struct files_struct *files, unsigned fd)
671 {
672 	struct fdtable *fdt = files_fdtable(files);
673 	struct file *file;
674 
675 	lockdep_assert_held(&files->file_lock);
676 
677 	if (fd >= fdt->max_fds)
678 		return NULL;
679 
680 	fd = array_index_nospec(fd, fdt->max_fds);
681 	file = fdt->fd[fd];
682 	if (file) {
683 		rcu_assign_pointer(fdt->fd[fd], NULL);
684 		__put_unused_fd(files, fd);
685 	}
686 	return file;
687 }
688 
close_fd(unsigned fd)689 int close_fd(unsigned fd)
690 {
691 	struct files_struct *files = current->files;
692 	struct file *file;
693 
694 	spin_lock(&files->file_lock);
695 	file = file_close_fd_locked(files, fd);
696 	spin_unlock(&files->file_lock);
697 	if (!file)
698 		return -EBADF;
699 
700 	return filp_close(file, files);
701 }
702 EXPORT_SYMBOL(close_fd);
703 
704 /**
705  * last_fd - return last valid index into fd table
706  * @fdt: File descriptor table.
707  *
708  * Context: Either rcu read lock or files_lock must be held.
709  *
710  * Returns: Last valid index into fdtable.
711  */
last_fd(struct fdtable * fdt)712 static inline unsigned last_fd(struct fdtable *fdt)
713 {
714 	return fdt->max_fds - 1;
715 }
716 
__range_cloexec(struct files_struct * cur_fds,unsigned int fd,unsigned int max_fd)717 static inline void __range_cloexec(struct files_struct *cur_fds,
718 				   unsigned int fd, unsigned int max_fd)
719 {
720 	struct fdtable *fdt;
721 
722 	/* make sure we're using the correct maximum value */
723 	spin_lock(&cur_fds->file_lock);
724 	fdt = files_fdtable(cur_fds);
725 	max_fd = min(last_fd(fdt), max_fd);
726 	if (fd <= max_fd)
727 		bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
728 	spin_unlock(&cur_fds->file_lock);
729 }
730 
__range_close(struct files_struct * files,unsigned int fd,unsigned int max_fd)731 static inline void __range_close(struct files_struct *files, unsigned int fd,
732 				 unsigned int max_fd)
733 {
734 	struct file *file;
735 	unsigned n;
736 
737 	spin_lock(&files->file_lock);
738 	n = last_fd(files_fdtable(files));
739 	max_fd = min(max_fd, n);
740 
741 	for (; fd <= max_fd; fd++) {
742 		file = file_close_fd_locked(files, fd);
743 		if (file) {
744 			spin_unlock(&files->file_lock);
745 			filp_close(file, files);
746 			cond_resched();
747 			spin_lock(&files->file_lock);
748 		} else if (need_resched()) {
749 			spin_unlock(&files->file_lock);
750 			cond_resched();
751 			spin_lock(&files->file_lock);
752 		}
753 	}
754 	spin_unlock(&files->file_lock);
755 }
756 
757 /**
758  * sys_close_range() - Close all file descriptors in a given range.
759  *
760  * @fd:     starting file descriptor to close
761  * @max_fd: last file descriptor to close
762  * @flags:  CLOSE_RANGE flags.
763  *
764  * This closes a range of file descriptors. All file descriptors
765  * from @fd up to and including @max_fd are closed.
766  * Currently, errors to close a given file descriptor are ignored.
767  */
SYSCALL_DEFINE3(close_range,unsigned int,fd,unsigned int,max_fd,unsigned int,flags)768 SYSCALL_DEFINE3(close_range, unsigned int, fd, unsigned int, max_fd,
769 		unsigned int, flags)
770 {
771 	struct task_struct *me = current;
772 	struct files_struct *cur_fds = me->files, *fds = NULL;
773 
774 	if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC))
775 		return -EINVAL;
776 
777 	if (fd > max_fd)
778 		return -EINVAL;
779 
780 	if ((flags & CLOSE_RANGE_UNSHARE) && atomic_read(&cur_fds->count) > 1) {
781 		struct fd_range range = {fd, max_fd}, *punch_hole = &range;
782 
783 		/*
784 		 * If the caller requested all fds to be made cloexec we always
785 		 * copy all of the file descriptors since they still want to
786 		 * use them.
787 		 */
788 		if (flags & CLOSE_RANGE_CLOEXEC)
789 			punch_hole = NULL;
790 
791 		fds = dup_fd(cur_fds, punch_hole);
792 		if (IS_ERR(fds))
793 			return PTR_ERR(fds);
794 		/*
795 		 * We used to share our file descriptor table, and have now
796 		 * created a private one, make sure we're using it below.
797 		 */
798 		swap(cur_fds, fds);
799 	}
800 
801 	if (flags & CLOSE_RANGE_CLOEXEC)
802 		__range_cloexec(cur_fds, fd, max_fd);
803 	else
804 		__range_close(cur_fds, fd, max_fd);
805 
806 	if (fds) {
807 		/*
808 		 * We're done closing the files we were supposed to. Time to install
809 		 * the new file descriptor table and drop the old one.
810 		 */
811 		task_lock(me);
812 		me->files = cur_fds;
813 		task_unlock(me);
814 		put_files_struct(fds);
815 	}
816 
817 	return 0;
818 }
819 
820 /**
821  * file_close_fd - return file associated with fd
822  * @fd: file descriptor to retrieve file for
823  *
824  * Doesn't take a separate reference count.
825  *
826  * Returns: The file associated with @fd (NULL if @fd is not open)
827  */
file_close_fd(unsigned int fd)828 struct file *file_close_fd(unsigned int fd)
829 {
830 	struct files_struct *files = current->files;
831 	struct file *file;
832 
833 	spin_lock(&files->file_lock);
834 	file = file_close_fd_locked(files, fd);
835 	spin_unlock(&files->file_lock);
836 
837 	return file;
838 }
839 
do_close_on_exec(struct files_struct * files)840 void do_close_on_exec(struct files_struct *files)
841 {
842 	unsigned i;
843 	struct fdtable *fdt;
844 
845 	/* exec unshares first */
846 	spin_lock(&files->file_lock);
847 	for (i = 0; ; i++) {
848 		unsigned long set;
849 		unsigned fd = i * BITS_PER_LONG;
850 		fdt = files_fdtable(files);
851 		if (fd >= fdt->max_fds)
852 			break;
853 		set = fdt->close_on_exec[i];
854 		if (!set)
855 			continue;
856 		fdt->close_on_exec[i] = 0;
857 		for ( ; set ; fd++, set >>= 1) {
858 			struct file *file;
859 			if (!(set & 1))
860 				continue;
861 			file = fdt->fd[fd];
862 			if (!file)
863 				continue;
864 			rcu_assign_pointer(fdt->fd[fd], NULL);
865 			__put_unused_fd(files, fd);
866 			spin_unlock(&files->file_lock);
867 			filp_close(file, files);
868 			cond_resched();
869 			spin_lock(&files->file_lock);
870 		}
871 
872 	}
873 	spin_unlock(&files->file_lock);
874 }
875 
__get_file_rcu(struct file __rcu ** f)876 static struct file *__get_file_rcu(struct file __rcu **f)
877 {
878 	struct file __rcu *file;
879 	struct file __rcu *file_reloaded;
880 	struct file __rcu *file_reloaded_cmp;
881 
882 	file = rcu_dereference_raw(*f);
883 	if (!file)
884 		return NULL;
885 
886 	if (unlikely(!file_ref_get(&file->f_ref)))
887 		return ERR_PTR(-EAGAIN);
888 
889 	file_reloaded = rcu_dereference_raw(*f);
890 
891 	/*
892 	 * Ensure that all accesses have a dependency on the load from
893 	 * rcu_dereference_raw() above so we get correct ordering
894 	 * between reuse/allocation and the pointer check below.
895 	 */
896 	file_reloaded_cmp = file_reloaded;
897 	OPTIMIZER_HIDE_VAR(file_reloaded_cmp);
898 
899 	/*
900 	 * file_ref_get() above provided a full memory barrier when we
901 	 * acquired a reference.
902 	 *
903 	 * This is paired with the write barrier from assigning to the
904 	 * __rcu protected file pointer so that if that pointer still
905 	 * matches the current file, we know we have successfully
906 	 * acquired a reference to the right file.
907 	 *
908 	 * If the pointers don't match the file has been reallocated by
909 	 * SLAB_TYPESAFE_BY_RCU.
910 	 */
911 	if (file == file_reloaded_cmp)
912 		return file_reloaded;
913 
914 	fput(file);
915 	return ERR_PTR(-EAGAIN);
916 }
917 
918 /**
919  * get_file_rcu - try go get a reference to a file under rcu
920  * @f: the file to get a reference on
921  *
922  * This function tries to get a reference on @f carefully verifying that
923  * @f hasn't been reused.
924  *
925  * This function should rarely have to be used and only by users who
926  * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
927  *
928  * Return: Returns @f with the reference count increased or NULL.
929  */
get_file_rcu(struct file __rcu ** f)930 struct file *get_file_rcu(struct file __rcu **f)
931 {
932 	for (;;) {
933 		struct file __rcu *file;
934 
935 		file = __get_file_rcu(f);
936 		if (!IS_ERR(file))
937 			return file;
938 	}
939 }
940 EXPORT_SYMBOL_GPL(get_file_rcu);
941 
942 /**
943  * get_file_active - try go get a reference to a file
944  * @f: the file to get a reference on
945  *
946  * In contast to get_file_rcu() the pointer itself isn't part of the
947  * reference counting.
948  *
949  * This function should rarely have to be used and only by users who
950  * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
951  *
952  * Return: Returns @f with the reference count increased or NULL.
953  */
get_file_active(struct file ** f)954 struct file *get_file_active(struct file **f)
955 {
956 	struct file __rcu *file;
957 
958 	rcu_read_lock();
959 	file = __get_file_rcu(f);
960 	rcu_read_unlock();
961 	if (IS_ERR(file))
962 		file = NULL;
963 	return file;
964 }
965 EXPORT_SYMBOL_GPL(get_file_active);
966 
__fget_files_rcu(struct files_struct * files,unsigned int fd,fmode_t mask)967 static inline struct file *__fget_files_rcu(struct files_struct *files,
968        unsigned int fd, fmode_t mask)
969 {
970 	for (;;) {
971 		struct file *file;
972 		struct fdtable *fdt = rcu_dereference_raw(files->fdt);
973 		struct file __rcu **fdentry;
974 		unsigned long nospec_mask;
975 
976 		/* Mask is a 0 for invalid fd's, ~0 for valid ones */
977 		nospec_mask = array_index_mask_nospec(fd, fdt->max_fds);
978 
979 		/*
980 		 * fdentry points to the 'fd' offset, or fdt->fd[0].
981 		 * Loading from fdt->fd[0] is always safe, because the
982 		 * array always exists.
983 		 */
984 		fdentry = fdt->fd + (fd & nospec_mask);
985 
986 		/* Do the load, then mask any invalid result */
987 		file = rcu_dereference_raw(*fdentry);
988 		file = (void *)(nospec_mask & (unsigned long)file);
989 		if (unlikely(!file))
990 			return NULL;
991 
992 		/*
993 		 * Ok, we have a file pointer that was valid at
994 		 * some point, but it might have become stale since.
995 		 *
996 		 * We need to confirm it by incrementing the refcount
997 		 * and then check the lookup again.
998 		 *
999 		 * file_ref_get() gives us a full memory barrier. We
1000 		 * only really need an 'acquire' one to protect the
1001 		 * loads below, but we don't have that.
1002 		 */
1003 		if (unlikely(!file_ref_get(&file->f_ref)))
1004 			continue;
1005 
1006 		/*
1007 		 * Such a race can take two forms:
1008 		 *
1009 		 *  (a) the file ref already went down to zero and the
1010 		 *      file hasn't been reused yet or the file count
1011 		 *      isn't zero but the file has already been reused.
1012 		 *
1013 		 *  (b) the file table entry has changed under us.
1014 		 *       Note that we don't need to re-check the 'fdt->fd'
1015 		 *       pointer having changed, because it always goes
1016 		 *       hand-in-hand with 'fdt'.
1017 		 *
1018 		 * If so, we need to put our ref and try again.
1019 		 */
1020 		if (unlikely(file != rcu_dereference_raw(*fdentry)) ||
1021 		    unlikely(rcu_dereference_raw(files->fdt) != fdt)) {
1022 			fput(file);
1023 			continue;
1024 		}
1025 
1026 		/*
1027 		 * This isn't the file we're looking for or we're not
1028 		 * allowed to get a reference to it.
1029 		 */
1030 		if (unlikely(file->f_mode & mask)) {
1031 			fput(file);
1032 			return NULL;
1033 		}
1034 
1035 		/*
1036 		 * Ok, we have a ref to the file, and checked that it
1037 		 * still exists.
1038 		 */
1039 		return file;
1040 	}
1041 }
1042 
__fget_files(struct files_struct * files,unsigned int fd,fmode_t mask)1043 static struct file *__fget_files(struct files_struct *files, unsigned int fd,
1044 				 fmode_t mask)
1045 {
1046 	struct file *file;
1047 
1048 	rcu_read_lock();
1049 	file = __fget_files_rcu(files, fd, mask);
1050 	rcu_read_unlock();
1051 
1052 	return file;
1053 }
1054 
__fget(unsigned int fd,fmode_t mask)1055 static inline struct file *__fget(unsigned int fd, fmode_t mask)
1056 {
1057 	return __fget_files(current->files, fd, mask);
1058 }
1059 
fget(unsigned int fd)1060 struct file *fget(unsigned int fd)
1061 {
1062 	return __fget(fd, FMODE_PATH);
1063 }
1064 EXPORT_SYMBOL(fget);
1065 
fget_raw(unsigned int fd)1066 struct file *fget_raw(unsigned int fd)
1067 {
1068 	return __fget(fd, 0);
1069 }
1070 EXPORT_SYMBOL(fget_raw);
1071 
fget_task(struct task_struct * task,unsigned int fd)1072 struct file *fget_task(struct task_struct *task, unsigned int fd)
1073 {
1074 	struct file *file = NULL;
1075 
1076 	task_lock(task);
1077 	if (task->files)
1078 		file = __fget_files(task->files, fd, 0);
1079 	task_unlock(task);
1080 
1081 	return file;
1082 }
1083 
fget_task_next(struct task_struct * task,unsigned int * ret_fd)1084 struct file *fget_task_next(struct task_struct *task, unsigned int *ret_fd)
1085 {
1086 	/* Must be called with rcu_read_lock held */
1087 	struct files_struct *files;
1088 	unsigned int fd = *ret_fd;
1089 	struct file *file = NULL;
1090 
1091 	task_lock(task);
1092 	files = task->files;
1093 	if (files) {
1094 		rcu_read_lock();
1095 		for (; fd < files_fdtable(files)->max_fds; fd++) {
1096 			file = __fget_files_rcu(files, fd, 0);
1097 			if (file)
1098 				break;
1099 		}
1100 		rcu_read_unlock();
1101 	}
1102 	task_unlock(task);
1103 	*ret_fd = fd;
1104 	return file;
1105 }
1106 EXPORT_SYMBOL(fget_task_next);
1107 
1108 /*
1109  * Lightweight file lookup - no refcnt increment if fd table isn't shared.
1110  *
1111  * You can use this instead of fget if you satisfy all of the following
1112  * conditions:
1113  * 1) You must call fput_light before exiting the syscall and returning control
1114  *    to userspace (i.e. you cannot remember the returned struct file * after
1115  *    returning to userspace).
1116  * 2) You must not call filp_close on the returned struct file * in between
1117  *    calls to fget_light and fput_light.
1118  * 3) You must not clone the current task in between the calls to fget_light
1119  *    and fput_light.
1120  *
1121  * The fput_needed flag returned by fget_light should be passed to the
1122  * corresponding fput_light.
1123  *
1124  * (As an exception to rule 2, you can call filp_close between fget_light and
1125  * fput_light provided that you capture a real refcount with get_file before
1126  * the call to filp_close, and ensure that this real refcount is fput *after*
1127  * the fput_light call.)
1128  *
1129  * See also the documentation in rust/kernel/file.rs.
1130  */
__fget_light(unsigned int fd,fmode_t mask)1131 static inline struct fd __fget_light(unsigned int fd, fmode_t mask)
1132 {
1133 	struct files_struct *files = current->files;
1134 	struct file *file;
1135 
1136 	/*
1137 	 * If another thread is concurrently calling close_fd() followed
1138 	 * by put_files_struct(), we must not observe the old table
1139 	 * entry combined with the new refcount - otherwise we could
1140 	 * return a file that is concurrently being freed.
1141 	 *
1142 	 * atomic_read_acquire() pairs with atomic_dec_and_test() in
1143 	 * put_files_struct().
1144 	 */
1145 	if (likely(atomic_read_acquire(&files->count) == 1)) {
1146 		file = files_lookup_fd_raw(files, fd);
1147 		if (!file || unlikely(file->f_mode & mask))
1148 			return EMPTY_FD;
1149 		return BORROWED_FD(file);
1150 	} else {
1151 		file = __fget_files(files, fd, mask);
1152 		if (!file)
1153 			return EMPTY_FD;
1154 		return CLONED_FD(file);
1155 	}
1156 }
fdget(unsigned int fd)1157 struct fd fdget(unsigned int fd)
1158 {
1159 	return __fget_light(fd, FMODE_PATH);
1160 }
1161 EXPORT_SYMBOL(fdget);
1162 
fdget_raw(unsigned int fd)1163 struct fd fdget_raw(unsigned int fd)
1164 {
1165 	return __fget_light(fd, 0);
1166 }
1167 
1168 /*
1169  * Try to avoid f_pos locking. We only need it if the
1170  * file is marked for FMODE_ATOMIC_POS, and it can be
1171  * accessed multiple ways.
1172  *
1173  * Always do it for directories, because pidfd_getfd()
1174  * can make a file accessible even if it otherwise would
1175  * not be, and for directories this is a correctness
1176  * issue, not a "POSIX requirement".
1177  */
file_needs_f_pos_lock(struct file * file)1178 static inline bool file_needs_f_pos_lock(struct file *file)
1179 {
1180 	return (file->f_mode & FMODE_ATOMIC_POS) &&
1181 		(file_count(file) > 1 || file->f_op->iterate_shared);
1182 }
1183 
fdget_pos(unsigned int fd)1184 struct fd fdget_pos(unsigned int fd)
1185 {
1186 	struct fd f = fdget(fd);
1187 	struct file *file = fd_file(f);
1188 
1189 	if (file && file_needs_f_pos_lock(file)) {
1190 		f.word |= FDPUT_POS_UNLOCK;
1191 		mutex_lock(&file->f_pos_lock);
1192 	}
1193 	return f;
1194 }
1195 
__f_unlock_pos(struct file * f)1196 void __f_unlock_pos(struct file *f)
1197 {
1198 	mutex_unlock(&f->f_pos_lock);
1199 }
1200 
1201 /*
1202  * We only lock f_pos if we have threads or if the file might be
1203  * shared with another process. In both cases we'll have an elevated
1204  * file count (done either by fdget() or by fork()).
1205  */
1206 
set_close_on_exec(unsigned int fd,int flag)1207 void set_close_on_exec(unsigned int fd, int flag)
1208 {
1209 	struct files_struct *files = current->files;
1210 	spin_lock(&files->file_lock);
1211 	__set_close_on_exec(fd, files_fdtable(files), flag);
1212 	spin_unlock(&files->file_lock);
1213 }
1214 
get_close_on_exec(unsigned int fd)1215 bool get_close_on_exec(unsigned int fd)
1216 {
1217 	bool res;
1218 	rcu_read_lock();
1219 	res = close_on_exec(fd, current->files);
1220 	rcu_read_unlock();
1221 	return res;
1222 }
1223 
do_dup2(struct files_struct * files,struct file * file,unsigned fd,unsigned flags)1224 static int do_dup2(struct files_struct *files,
1225 	struct file *file, unsigned fd, unsigned flags)
1226 __releases(&files->file_lock)
1227 {
1228 	struct file *tofree;
1229 	struct fdtable *fdt;
1230 
1231 	/*
1232 	 * We need to detect attempts to do dup2() over allocated but still
1233 	 * not finished descriptor.  NB: OpenBSD avoids that at the price of
1234 	 * extra work in their equivalent of fget() - they insert struct
1235 	 * file immediately after grabbing descriptor, mark it larval if
1236 	 * more work (e.g. actual opening) is needed and make sure that
1237 	 * fget() treats larval files as absent.  Potentially interesting,
1238 	 * but while extra work in fget() is trivial, locking implications
1239 	 * and amount of surgery on open()-related paths in VFS are not.
1240 	 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1241 	 * deadlocks in rather amusing ways, AFAICS.  All of that is out of
1242 	 * scope of POSIX or SUS, since neither considers shared descriptor
1243 	 * tables and this condition does not arise without those.
1244 	 */
1245 	fdt = files_fdtable(files);
1246 	fd = array_index_nospec(fd, fdt->max_fds);
1247 	tofree = fdt->fd[fd];
1248 	if (!tofree && fd_is_open(fd, fdt))
1249 		goto Ebusy;
1250 	get_file(file);
1251 	rcu_assign_pointer(fdt->fd[fd], file);
1252 	__set_open_fd(fd, fdt, flags & O_CLOEXEC);
1253 	spin_unlock(&files->file_lock);
1254 
1255 	if (tofree)
1256 		filp_close(tofree, files);
1257 
1258 	return fd;
1259 
1260 Ebusy:
1261 	spin_unlock(&files->file_lock);
1262 	return -EBUSY;
1263 }
1264 
replace_fd(unsigned fd,struct file * file,unsigned flags)1265 int replace_fd(unsigned fd, struct file *file, unsigned flags)
1266 {
1267 	int err;
1268 	struct files_struct *files = current->files;
1269 
1270 	if (!file)
1271 		return close_fd(fd);
1272 
1273 	if (fd >= rlimit(RLIMIT_NOFILE))
1274 		return -EBADF;
1275 
1276 	spin_lock(&files->file_lock);
1277 	err = expand_files(files, fd);
1278 	if (unlikely(err < 0))
1279 		goto out_unlock;
1280 	return do_dup2(files, file, fd, flags);
1281 
1282 out_unlock:
1283 	spin_unlock(&files->file_lock);
1284 	return err;
1285 }
1286 
1287 /**
1288  * receive_fd() - Install received file into file descriptor table
1289  * @file: struct file that was received from another process
1290  * @ufd: __user pointer to write new fd number to
1291  * @o_flags: the O_* flags to apply to the new fd entry
1292  *
1293  * Installs a received file into the file descriptor table, with appropriate
1294  * checks and count updates. Optionally writes the fd number to userspace, if
1295  * @ufd is non-NULL.
1296  *
1297  * This helper handles its own reference counting of the incoming
1298  * struct file.
1299  *
1300  * Returns newly install fd or -ve on error.
1301  */
receive_fd(struct file * file,int __user * ufd,unsigned int o_flags)1302 int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
1303 {
1304 	int new_fd;
1305 	int error;
1306 
1307 	error = security_file_receive(file);
1308 	if (error)
1309 		return error;
1310 
1311 	new_fd = get_unused_fd_flags(o_flags);
1312 	if (new_fd < 0)
1313 		return new_fd;
1314 
1315 	if (ufd) {
1316 		error = put_user(new_fd, ufd);
1317 		if (error) {
1318 			put_unused_fd(new_fd);
1319 			return error;
1320 		}
1321 	}
1322 
1323 	fd_install(new_fd, get_file(file));
1324 	__receive_sock(file);
1325 	return new_fd;
1326 }
1327 EXPORT_SYMBOL_GPL(receive_fd);
1328 
receive_fd_replace(int new_fd,struct file * file,unsigned int o_flags)1329 int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
1330 {
1331 	int error;
1332 
1333 	error = security_file_receive(file);
1334 	if (error)
1335 		return error;
1336 	error = replace_fd(new_fd, file, o_flags);
1337 	if (error)
1338 		return error;
1339 	__receive_sock(file);
1340 	return new_fd;
1341 }
1342 
ksys_dup3(unsigned int oldfd,unsigned int newfd,int flags)1343 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
1344 {
1345 	int err = -EBADF;
1346 	struct file *file;
1347 	struct files_struct *files = current->files;
1348 
1349 	if ((flags & ~O_CLOEXEC) != 0)
1350 		return -EINVAL;
1351 
1352 	if (unlikely(oldfd == newfd))
1353 		return -EINVAL;
1354 
1355 	if (newfd >= rlimit(RLIMIT_NOFILE))
1356 		return -EBADF;
1357 
1358 	spin_lock(&files->file_lock);
1359 	err = expand_files(files, newfd);
1360 	file = files_lookup_fd_locked(files, oldfd);
1361 	if (unlikely(!file))
1362 		goto Ebadf;
1363 	if (unlikely(err < 0)) {
1364 		if (err == -EMFILE)
1365 			goto Ebadf;
1366 		goto out_unlock;
1367 	}
1368 	return do_dup2(files, file, newfd, flags);
1369 
1370 Ebadf:
1371 	err = -EBADF;
1372 out_unlock:
1373 	spin_unlock(&files->file_lock);
1374 	return err;
1375 }
1376 
SYSCALL_DEFINE3(dup3,unsigned int,oldfd,unsigned int,newfd,int,flags)1377 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
1378 {
1379 	return ksys_dup3(oldfd, newfd, flags);
1380 }
1381 
SYSCALL_DEFINE2(dup2,unsigned int,oldfd,unsigned int,newfd)1382 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
1383 {
1384 	if (unlikely(newfd == oldfd)) { /* corner case */
1385 		struct files_struct *files = current->files;
1386 		struct file *f;
1387 		int retval = oldfd;
1388 
1389 		rcu_read_lock();
1390 		f = __fget_files_rcu(files, oldfd, 0);
1391 		if (!f)
1392 			retval = -EBADF;
1393 		rcu_read_unlock();
1394 		if (f)
1395 			fput(f);
1396 		return retval;
1397 	}
1398 	return ksys_dup3(oldfd, newfd, 0);
1399 }
1400 
SYSCALL_DEFINE1(dup,unsigned int,fildes)1401 SYSCALL_DEFINE1(dup, unsigned int, fildes)
1402 {
1403 	int ret = -EBADF;
1404 	struct file *file = fget_raw(fildes);
1405 
1406 	if (file) {
1407 		ret = get_unused_fd_flags(0);
1408 		if (ret >= 0)
1409 			fd_install(ret, file);
1410 		else
1411 			fput(file);
1412 	}
1413 	return ret;
1414 }
1415 
f_dupfd(unsigned int from,struct file * file,unsigned flags)1416 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1417 {
1418 	unsigned long nofile = rlimit(RLIMIT_NOFILE);
1419 	int err;
1420 	if (from >= nofile)
1421 		return -EINVAL;
1422 	err = alloc_fd(from, nofile, flags);
1423 	if (err >= 0) {
1424 		get_file(file);
1425 		fd_install(err, file);
1426 	}
1427 	return err;
1428 }
1429 
iterate_fd(struct files_struct * files,unsigned n,int (* f)(const void *,struct file *,unsigned),const void * p)1430 int iterate_fd(struct files_struct *files, unsigned n,
1431 		int (*f)(const void *, struct file *, unsigned),
1432 		const void *p)
1433 {
1434 	struct fdtable *fdt;
1435 	int res = 0;
1436 	if (!files)
1437 		return 0;
1438 	spin_lock(&files->file_lock);
1439 	for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1440 		struct file *file;
1441 		file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1442 		if (!file)
1443 			continue;
1444 		res = f(p, file, n);
1445 		if (res)
1446 			break;
1447 	}
1448 	spin_unlock(&files->file_lock);
1449 	return res;
1450 }
1451 EXPORT_SYMBOL(iterate_fd);
1452