xref: /linux/fs/file_table.c (revision adc4fb9c814b5d5cc6021022900fd5eb0b3c8165)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/file_table.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *  Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
7  */
8 
9 #include <linux/string.h>
10 #include <linux/slab.h>
11 #include <linux/file.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/security.h>
17 #include <linux/cred.h>
18 #include <linux/eventpoll.h>
19 #include <linux/rcupdate.h>
20 #include <linux/mount.h>
21 #include <linux/capability.h>
22 #include <linux/cdev.h>
23 #include <linux/fsnotify.h>
24 #include <linux/sysctl.h>
25 #include <linux/percpu_counter.h>
26 #include <linux/percpu.h>
27 #include <linux/task_work.h>
28 #include <linux/swap.h>
29 #include <linux/kmemleak.h>
30 
31 #include <linux/atomic.h>
32 
33 #include "internal.h"
34 
35 /* sysctl tunables... */
36 static struct files_stat_struct files_stat = {
37 	.max_files = NR_FILE
38 };
39 
40 /* SLAB cache for file structures */
41 static struct kmem_cache *filp_cachep __ro_after_init;
42 static struct kmem_cache *bfilp_cachep __ro_after_init;
43 
44 static struct percpu_counter nr_files __cacheline_aligned_in_smp;
45 
46 /* Container for backing file with optional user path */
47 struct backing_file {
48 	struct file file;
49 	union {
50 		struct path user_path;
51 		freeptr_t bf_freeptr;
52 	};
53 };
54 
backing_file(struct file * f)55 static inline struct backing_file *backing_file(struct file *f)
56 {
57 	return container_of(f, struct backing_file, file);
58 }
59 
backing_file_user_path(struct file * f)60 struct path *backing_file_user_path(struct file *f)
61 {
62 	return &backing_file(f)->user_path;
63 }
64 EXPORT_SYMBOL_GPL(backing_file_user_path);
65 
file_free(struct file * f)66 static inline void file_free(struct file *f)
67 {
68 	security_file_free(f);
69 	if (likely(!(f->f_mode & FMODE_NOACCOUNT)))
70 		percpu_counter_dec(&nr_files);
71 	put_cred(f->f_cred);
72 	if (unlikely(f->f_mode & FMODE_BACKING)) {
73 		path_put(backing_file_user_path(f));
74 		kmem_cache_free(bfilp_cachep, backing_file(f));
75 	} else {
76 		kmem_cache_free(filp_cachep, f);
77 	}
78 }
79 
80 /*
81  * Return the total number of open files in the system
82  */
get_nr_files(void)83 static long get_nr_files(void)
84 {
85 	return percpu_counter_read_positive(&nr_files);
86 }
87 
88 /*
89  * Return the maximum number of open files in the system
90  */
get_max_files(void)91 unsigned long get_max_files(void)
92 {
93 	return files_stat.max_files;
94 }
95 EXPORT_SYMBOL_GPL(get_max_files);
96 
97 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
98 
99 /*
100  * Handle nr_files sysctl
101  */
proc_nr_files(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)102 static int proc_nr_files(const struct ctl_table *table, int write, void *buffer,
103 			 size_t *lenp, loff_t *ppos)
104 {
105 	files_stat.nr_files = get_nr_files();
106 	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
107 }
108 
109 static const struct ctl_table fs_stat_sysctls[] = {
110 	{
111 		.procname	= "file-nr",
112 		.data		= &files_stat,
113 		.maxlen		= sizeof(files_stat),
114 		.mode		= 0444,
115 		.proc_handler	= proc_nr_files,
116 	},
117 	{
118 		.procname	= "file-max",
119 		.data		= &files_stat.max_files,
120 		.maxlen		= sizeof(files_stat.max_files),
121 		.mode		= 0644,
122 		.proc_handler	= proc_doulongvec_minmax,
123 		.extra1		= SYSCTL_LONG_ZERO,
124 		.extra2		= SYSCTL_LONG_MAX,
125 	},
126 	{
127 		.procname	= "nr_open",
128 		.data		= &sysctl_nr_open,
129 		.maxlen		= sizeof(unsigned int),
130 		.mode		= 0644,
131 		.proc_handler	= proc_douintvec_minmax,
132 		.extra1		= &sysctl_nr_open_min,
133 		.extra2		= &sysctl_nr_open_max,
134 	},
135 };
136 
init_fs_stat_sysctls(void)137 static int __init init_fs_stat_sysctls(void)
138 {
139 	register_sysctl_init("fs", fs_stat_sysctls);
140 	if (IS_ENABLED(CONFIG_BINFMT_MISC)) {
141 		struct ctl_table_header *hdr;
142 
143 		hdr = register_sysctl_mount_point("fs/binfmt_misc");
144 		kmemleak_not_leak(hdr);
145 	}
146 	return 0;
147 }
148 fs_initcall(init_fs_stat_sysctls);
149 #endif
150 
init_file(struct file * f,int flags,const struct cred * cred)151 static int init_file(struct file *f, int flags, const struct cred *cred)
152 {
153 	int error;
154 
155 	f->f_cred = get_cred(cred);
156 	error = security_file_alloc(f);
157 	if (unlikely(error)) {
158 		put_cred(f->f_cred);
159 		return error;
160 	}
161 
162 	spin_lock_init(&f->f_lock);
163 	/*
164 	 * Note that f_pos_lock is only used for files raising
165 	 * FMODE_ATOMIC_POS and directories. Other files such as pipes
166 	 * don't need it and since f_pos_lock is in a union may reuse
167 	 * the space for other purposes. They are expected to initialize
168 	 * the respective member when opening the file.
169 	 */
170 	mutex_init(&f->f_pos_lock);
171 	memset(&f->f_path, 0, sizeof(f->f_path));
172 	memset(&f->f_ra, 0, sizeof(f->f_ra));
173 
174 	f->f_flags	= flags;
175 	f->f_mode	= OPEN_FMODE(flags);
176 
177 	f->f_op		= NULL;
178 	f->f_mapping	= NULL;
179 	f->private_data = NULL;
180 	f->f_inode	= NULL;
181 	f->f_owner	= NULL;
182 #ifdef CONFIG_EPOLL
183 	f->f_ep		= NULL;
184 #endif
185 
186 	f->f_iocb_flags = 0;
187 	f->f_pos	= 0;
188 	f->f_wb_err	= 0;
189 	f->f_sb_err	= 0;
190 
191 	/*
192 	 * We're SLAB_TYPESAFE_BY_RCU so initialize f_count last. While
193 	 * fget-rcu pattern users need to be able to handle spurious
194 	 * refcount bumps we should reinitialize the reused file first.
195 	 */
196 	file_ref_init(&f->f_ref, 1);
197 	/*
198 	 * Disable permission and pre-content events for all files by default.
199 	 * They may be enabled later by file_set_fsnotify_mode_from_watchers().
200 	 */
201 	file_set_fsnotify_mode(f, FMODE_NONOTIFY_PERM);
202 	return 0;
203 }
204 
205 /* Find an unused file structure and return a pointer to it.
206  * Returns an error pointer if some error happend e.g. we over file
207  * structures limit, run out of memory or operation is not permitted.
208  *
209  * Be very careful using this.  You are responsible for
210  * getting write access to any mount that you might assign
211  * to this filp, if it is opened for write.  If this is not
212  * done, you will imbalance int the mount's writer count
213  * and a warning at __fput() time.
214  */
alloc_empty_file(int flags,const struct cred * cred)215 struct file *alloc_empty_file(int flags, const struct cred *cred)
216 {
217 	static long old_max;
218 	struct file *f;
219 	int error;
220 
221 	/*
222 	 * Privileged users can go above max_files
223 	 */
224 	if (unlikely(get_nr_files() >= files_stat.max_files) &&
225 	    !capable(CAP_SYS_ADMIN)) {
226 		/*
227 		 * percpu_counters are inaccurate.  Do an expensive check before
228 		 * we go and fail.
229 		 */
230 		if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
231 			goto over;
232 	}
233 
234 	f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
235 	if (unlikely(!f))
236 		return ERR_PTR(-ENOMEM);
237 
238 	error = init_file(f, flags, cred);
239 	if (unlikely(error)) {
240 		kmem_cache_free(filp_cachep, f);
241 		return ERR_PTR(error);
242 	}
243 
244 	percpu_counter_inc(&nr_files);
245 
246 	return f;
247 
248 over:
249 	/* Ran out of filps - report that */
250 	if (get_nr_files() > old_max) {
251 		pr_info("VFS: file-max limit %lu reached\n", get_max_files());
252 		old_max = get_nr_files();
253 	}
254 	return ERR_PTR(-ENFILE);
255 }
256 
257 /*
258  * Variant of alloc_empty_file() that doesn't check and modify nr_files.
259  *
260  * This is only for kernel internal use, and the allocate file must not be
261  * installed into file tables or such.
262  */
alloc_empty_file_noaccount(int flags,const struct cred * cred)263 struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred)
264 {
265 	struct file *f;
266 	int error;
267 
268 	f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
269 	if (unlikely(!f))
270 		return ERR_PTR(-ENOMEM);
271 
272 	error = init_file(f, flags, cred);
273 	if (unlikely(error)) {
274 		kmem_cache_free(filp_cachep, f);
275 		return ERR_PTR(error);
276 	}
277 
278 	f->f_mode |= FMODE_NOACCOUNT;
279 
280 	return f;
281 }
282 
283 /*
284  * Variant of alloc_empty_file() that allocates a backing_file container
285  * and doesn't check and modify nr_files.
286  *
287  * This is only for kernel internal use, and the allocate file must not be
288  * installed into file tables or such.
289  */
alloc_empty_backing_file(int flags,const struct cred * cred)290 struct file *alloc_empty_backing_file(int flags, const struct cred *cred)
291 {
292 	struct backing_file *ff;
293 	int error;
294 
295 	ff = kmem_cache_alloc(bfilp_cachep, GFP_KERNEL);
296 	if (unlikely(!ff))
297 		return ERR_PTR(-ENOMEM);
298 
299 	error = init_file(&ff->file, flags, cred);
300 	if (unlikely(error)) {
301 		kmem_cache_free(bfilp_cachep, ff);
302 		return ERR_PTR(error);
303 	}
304 
305 	ff->file.f_mode |= FMODE_BACKING | FMODE_NOACCOUNT;
306 	return &ff->file;
307 }
308 
309 /**
310  * file_init_path - initialize a 'struct file' based on path
311  *
312  * @file: the file to set up
313  * @path: the (dentry, vfsmount) pair for the new file
314  * @fop: the 'struct file_operations' for the new file
315  */
file_init_path(struct file * file,const struct path * path,const struct file_operations * fop)316 static void file_init_path(struct file *file, const struct path *path,
317 			   const struct file_operations *fop)
318 {
319 	file->f_path = *path;
320 	file->f_inode = path->dentry->d_inode;
321 	file->f_mapping = path->dentry->d_inode->i_mapping;
322 	file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
323 	file->f_sb_err = file_sample_sb_err(file);
324 	if (fop->llseek)
325 		file->f_mode |= FMODE_LSEEK;
326 	if ((file->f_mode & FMODE_READ) &&
327 	     likely(fop->read || fop->read_iter))
328 		file->f_mode |= FMODE_CAN_READ;
329 	if ((file->f_mode & FMODE_WRITE) &&
330 	     likely(fop->write || fop->write_iter))
331 		file->f_mode |= FMODE_CAN_WRITE;
332 	file->f_iocb_flags = iocb_flags(file);
333 	file->f_mode |= FMODE_OPENED;
334 	file->f_op = fop;
335 	if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
336 		i_readcount_inc(path->dentry->d_inode);
337 }
338 
339 /**
340  * alloc_file - allocate and initialize a 'struct file'
341  *
342  * @path: the (dentry, vfsmount) pair for the new file
343  * @flags: O_... flags with which the new file will be opened
344  * @fop: the 'struct file_operations' for the new file
345  */
alloc_file(const struct path * path,int flags,const struct file_operations * fop)346 static struct file *alloc_file(const struct path *path, int flags,
347 		const struct file_operations *fop)
348 {
349 	struct file *file;
350 
351 	file = alloc_empty_file(flags, current_cred());
352 	if (!IS_ERR(file))
353 		file_init_path(file, path, fop);
354 	return file;
355 }
356 
alloc_path_pseudo(const char * name,struct inode * inode,struct vfsmount * mnt,struct path * path)357 static inline int alloc_path_pseudo(const char *name, struct inode *inode,
358 				    struct vfsmount *mnt, struct path *path)
359 {
360 	path->dentry = d_alloc_pseudo(mnt->mnt_sb, &QSTR(name));
361 	if (!path->dentry)
362 		return -ENOMEM;
363 	path->mnt = mntget(mnt);
364 	d_instantiate(path->dentry, inode);
365 	return 0;
366 }
367 
alloc_file_pseudo(struct inode * inode,struct vfsmount * mnt,const char * name,int flags,const struct file_operations * fops)368 struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
369 			       const char *name, int flags,
370 			       const struct file_operations *fops)
371 {
372 	int ret;
373 	struct path path;
374 	struct file *file;
375 
376 	ret = alloc_path_pseudo(name, inode, mnt, &path);
377 	if (ret)
378 		return ERR_PTR(ret);
379 
380 	file = alloc_file(&path, flags, fops);
381 	if (IS_ERR(file)) {
382 		ihold(inode);
383 		path_put(&path);
384 		return file;
385 	}
386 	/*
387 	 * Disable all fsnotify events for pseudo files by default.
388 	 * They may be enabled by caller with file_set_fsnotify_mode().
389 	 */
390 	file_set_fsnotify_mode(file, FMODE_NONOTIFY);
391 	return file;
392 }
393 EXPORT_SYMBOL(alloc_file_pseudo);
394 
alloc_file_pseudo_noaccount(struct inode * inode,struct vfsmount * mnt,const char * name,int flags,const struct file_operations * fops)395 struct file *alloc_file_pseudo_noaccount(struct inode *inode,
396 					 struct vfsmount *mnt, const char *name,
397 					 int flags,
398 					 const struct file_operations *fops)
399 {
400 	int ret;
401 	struct path path;
402 	struct file *file;
403 
404 	ret = alloc_path_pseudo(name, inode, mnt, &path);
405 	if (ret)
406 		return ERR_PTR(ret);
407 
408 	file = alloc_empty_file_noaccount(flags, current_cred());
409 	if (IS_ERR(file)) {
410 		ihold(inode);
411 		path_put(&path);
412 		return file;
413 	}
414 	file_init_path(file, &path, fops);
415 	/*
416 	 * Disable all fsnotify events for pseudo files by default.
417 	 * They may be enabled by caller with file_set_fsnotify_mode().
418 	 */
419 	file_set_fsnotify_mode(file, FMODE_NONOTIFY);
420 	return file;
421 }
422 EXPORT_SYMBOL_GPL(alloc_file_pseudo_noaccount);
423 
alloc_file_clone(struct file * base,int flags,const struct file_operations * fops)424 struct file *alloc_file_clone(struct file *base, int flags,
425 				const struct file_operations *fops)
426 {
427 	struct file *f;
428 
429 	f = alloc_file(&base->f_path, flags, fops);
430 	if (!IS_ERR(f)) {
431 		path_get(&f->f_path);
432 		f->f_mapping = base->f_mapping;
433 	}
434 	return f;
435 }
436 
437 /* the real guts of fput() - releasing the last reference to file
438  */
__fput(struct file * file)439 static void __fput(struct file *file)
440 {
441 	struct dentry *dentry = file->f_path.dentry;
442 	struct vfsmount *mnt = file->f_path.mnt;
443 	struct inode *inode = file->f_inode;
444 	fmode_t mode = file->f_mode;
445 
446 	if (unlikely(!(file->f_mode & FMODE_OPENED)))
447 		goto out;
448 
449 	might_sleep();
450 
451 	fsnotify_close(file);
452 	/*
453 	 * The function eventpoll_release() should be the first called
454 	 * in the file cleanup chain.
455 	 */
456 	eventpoll_release(file);
457 	locks_remove_file(file);
458 
459 	security_file_release(file);
460 	if (unlikely(file->f_flags & FASYNC)) {
461 		if (file->f_op->fasync)
462 			file->f_op->fasync(-1, file, 0);
463 	}
464 	if (file->f_op->release)
465 		file->f_op->release(inode, file);
466 	if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
467 		     !(mode & FMODE_PATH))) {
468 		cdev_put(inode->i_cdev);
469 	}
470 	fops_put(file->f_op);
471 	file_f_owner_release(file);
472 	put_file_access(file);
473 	dput(dentry);
474 	if (unlikely(mode & FMODE_NEED_UNMOUNT))
475 		dissolve_on_fput(mnt);
476 	mntput(mnt);
477 out:
478 	file_free(file);
479 }
480 
481 static LLIST_HEAD(delayed_fput_list);
delayed_fput(struct work_struct * unused)482 static void delayed_fput(struct work_struct *unused)
483 {
484 	struct llist_node *node = llist_del_all(&delayed_fput_list);
485 	struct file *f, *t;
486 
487 	llist_for_each_entry_safe(f, t, node, f_llist)
488 		__fput(f);
489 }
490 
____fput(struct callback_head * work)491 static void ____fput(struct callback_head *work)
492 {
493 	__fput(container_of(work, struct file, f_task_work));
494 }
495 
496 static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
497 
498 /*
499  * If kernel thread really needs to have the final fput() it has done
500  * to complete, call this.  The only user right now is the boot - we
501  * *do* need to make sure our writes to binaries on initramfs has
502  * not left us with opened struct file waiting for __fput() - execve()
503  * won't work without that.  Please, don't add more callers without
504  * very good reasons; in particular, never call that with locks
505  * held and never call that from a thread that might need to do
506  * some work on any kind of umount.
507  */
flush_delayed_fput(void)508 void flush_delayed_fput(void)
509 {
510 	delayed_fput(NULL);
511 	flush_delayed_work(&delayed_fput_work);
512 }
513 EXPORT_SYMBOL_GPL(flush_delayed_fput);
514 
__fput_deferred(struct file * file)515 static void __fput_deferred(struct file *file)
516 {
517 	struct task_struct *task = current;
518 
519 	if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) {
520 		file_free(file);
521 		return;
522 	}
523 
524 	if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
525 		init_task_work(&file->f_task_work, ____fput);
526 		if (!task_work_add(task, &file->f_task_work, TWA_RESUME))
527 			return;
528 		/*
529 		 * After this task has run exit_task_work(),
530 		 * task_work_add() will fail.  Fall through to delayed
531 		 * fput to avoid leaking *file.
532 		 */
533 	}
534 
535 	if (llist_add(&file->f_llist, &delayed_fput_list))
536 		schedule_delayed_work(&delayed_fput_work, 1);
537 }
538 
fput(struct file * file)539 void fput(struct file *file)
540 {
541 	if (unlikely(file_ref_put(&file->f_ref)))
542 		__fput_deferred(file);
543 }
544 EXPORT_SYMBOL(fput);
545 
546 /*
547  * synchronous analog of fput(); for kernel threads that might be needed
548  * in some umount() (and thus can't use flush_delayed_fput() without
549  * risking deadlocks), need to wait for completion of __fput() and know
550  * for this specific struct file it won't involve anything that would
551  * need them.  Use only if you really need it - at the very least,
552  * don't blindly convert fput() by kernel thread to that.
553  */
__fput_sync(struct file * file)554 void __fput_sync(struct file *file)
555 {
556 	if (file_ref_put(&file->f_ref))
557 		__fput(file);
558 }
559 EXPORT_SYMBOL(__fput_sync);
560 
561 /*
562  * Equivalent to __fput_sync(), but optimized for being called with the last
563  * reference.
564  *
565  * See file_ref_put_close() for details.
566  */
fput_close_sync(struct file * file)567 void fput_close_sync(struct file *file)
568 {
569 	if (likely(file_ref_put_close(&file->f_ref)))
570 		__fput(file);
571 }
572 
573 /*
574  * Equivalent to fput(), but optimized for being called with the last
575  * reference.
576  *
577  * See file_ref_put_close() for details.
578  */
fput_close(struct file * file)579 void fput_close(struct file *file)
580 {
581 	if (file_ref_put_close(&file->f_ref))
582 		__fput_deferred(file);
583 }
584 
files_init(void)585 void __init files_init(void)
586 {
587 	struct kmem_cache_args args = {
588 		.use_freeptr_offset = true,
589 		.freeptr_offset = offsetof(struct file, f_freeptr),
590 	};
591 
592 	filp_cachep = kmem_cache_create("filp", sizeof(struct file), &args,
593 				SLAB_HWCACHE_ALIGN | SLAB_PANIC |
594 				SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
595 
596 	args.freeptr_offset = offsetof(struct backing_file, bf_freeptr);
597 	bfilp_cachep = kmem_cache_create("bfilp", sizeof(struct backing_file),
598 				&args, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
599 				SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
600 	percpu_counter_init(&nr_files, 0, GFP_KERNEL);
601 }
602 
603 /*
604  * One file with associated inode and dcache is very roughly 1K. Per default
605  * do not use more than 10% of our memory for files.
606  */
files_maxfiles_init(void)607 void __init files_maxfiles_init(void)
608 {
609 	unsigned long n;
610 	unsigned long nr_pages = totalram_pages();
611 	unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2;
612 
613 	memreserve = min(memreserve, nr_pages - 1);
614 	n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
615 
616 	files_stat.max_files = max_t(unsigned long, n, NR_FILE);
617 }
618