xref: /linux/fs/file_table.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/file_table.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *  Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
7  */
8 
9 #include <linux/string.h>
10 #include <linux/slab.h>
11 #include <linux/file.h>
12 #include <linux/fdtable.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/fs.h>
16 #include <linux/filelock.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/eventpoll.h>
20 #include <linux/rcupdate.h>
21 #include <linux/mount.h>
22 #include <linux/capability.h>
23 #include <linux/cdev.h>
24 #include <linux/fsnotify.h>
25 #include <linux/sysctl.h>
26 #include <linux/percpu_counter.h>
27 #include <linux/percpu.h>
28 #include <linux/task_work.h>
29 #include <linux/swap.h>
30 #include <linux/kmemleak.h>
31 
32 #include <linux/atomic.h>
33 
34 #include "internal.h"
35 
36 /* sysctl tunables... */
37 static struct files_stat_struct files_stat = {
38 	.max_files = NR_FILE
39 };
40 
41 /* SLAB cache for file structures */
42 static struct kmem_cache *filp_cachep __ro_after_init;
43 
44 static struct percpu_counter nr_files __cacheline_aligned_in_smp;
45 
46 /* Container for backing file with optional user path */
47 struct backing_file {
48 	struct file file;
49 	struct path user_path;
50 };
51 
52 static inline struct backing_file *backing_file(struct file *f)
53 {
54 	return container_of(f, struct backing_file, file);
55 }
56 
57 struct path *backing_file_user_path(struct file *f)
58 {
59 	return &backing_file(f)->user_path;
60 }
61 EXPORT_SYMBOL_GPL(backing_file_user_path);
62 
63 static inline void file_free(struct file *f)
64 {
65 	security_file_free(f);
66 	if (likely(!(f->f_mode & FMODE_NOACCOUNT)))
67 		percpu_counter_dec(&nr_files);
68 	put_cred(f->f_cred);
69 	if (unlikely(f->f_mode & FMODE_BACKING)) {
70 		path_put(backing_file_user_path(f));
71 		kfree(backing_file(f));
72 	} else {
73 		kmem_cache_free(filp_cachep, f);
74 	}
75 }
76 
77 /*
78  * Return the total number of open files in the system
79  */
80 static long get_nr_files(void)
81 {
82 	return percpu_counter_read_positive(&nr_files);
83 }
84 
85 /*
86  * Return the maximum number of open files in the system
87  */
88 unsigned long get_max_files(void)
89 {
90 	return files_stat.max_files;
91 }
92 EXPORT_SYMBOL_GPL(get_max_files);
93 
94 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
95 
96 /*
97  * Handle nr_files sysctl
98  */
99 static int proc_nr_files(const struct ctl_table *table, int write, void *buffer,
100 			 size_t *lenp, loff_t *ppos)
101 {
102 	files_stat.nr_files = get_nr_files();
103 	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
104 }
105 
106 static struct ctl_table fs_stat_sysctls[] = {
107 	{
108 		.procname	= "file-nr",
109 		.data		= &files_stat,
110 		.maxlen		= sizeof(files_stat),
111 		.mode		= 0444,
112 		.proc_handler	= proc_nr_files,
113 	},
114 	{
115 		.procname	= "file-max",
116 		.data		= &files_stat.max_files,
117 		.maxlen		= sizeof(files_stat.max_files),
118 		.mode		= 0644,
119 		.proc_handler	= proc_doulongvec_minmax,
120 		.extra1		= SYSCTL_LONG_ZERO,
121 		.extra2		= SYSCTL_LONG_MAX,
122 	},
123 	{
124 		.procname	= "nr_open",
125 		.data		= &sysctl_nr_open,
126 		.maxlen		= sizeof(unsigned int),
127 		.mode		= 0644,
128 		.proc_handler	= proc_dointvec_minmax,
129 		.extra1		= &sysctl_nr_open_min,
130 		.extra2		= &sysctl_nr_open_max,
131 	},
132 };
133 
134 static int __init init_fs_stat_sysctls(void)
135 {
136 	register_sysctl_init("fs", fs_stat_sysctls);
137 	if (IS_ENABLED(CONFIG_BINFMT_MISC)) {
138 		struct ctl_table_header *hdr;
139 
140 		hdr = register_sysctl_mount_point("fs/binfmt_misc");
141 		kmemleak_not_leak(hdr);
142 	}
143 	return 0;
144 }
145 fs_initcall(init_fs_stat_sysctls);
146 #endif
147 
148 static int init_file(struct file *f, int flags, const struct cred *cred)
149 {
150 	int error;
151 
152 	f->f_cred = get_cred(cred);
153 	error = security_file_alloc(f);
154 	if (unlikely(error)) {
155 		put_cred(f->f_cred);
156 		return error;
157 	}
158 
159 	spin_lock_init(&f->f_lock);
160 	/*
161 	 * Note that f_pos_lock is only used for files raising
162 	 * FMODE_ATOMIC_POS and directories. Other files such as pipes
163 	 * don't need it and since f_pos_lock is in a union may reuse
164 	 * the space for other purposes. They are expected to initialize
165 	 * the respective member when opening the file.
166 	 */
167 	mutex_init(&f->f_pos_lock);
168 	f->f_flags = flags;
169 	f->f_mode = OPEN_FMODE(flags);
170 	/* f->f_version: 0 */
171 
172 	/*
173 	 * We're SLAB_TYPESAFE_BY_RCU so initialize f_count last. While
174 	 * fget-rcu pattern users need to be able to handle spurious
175 	 * refcount bumps we should reinitialize the reused file first.
176 	 */
177 	atomic_long_set(&f->f_count, 1);
178 	return 0;
179 }
180 
181 /* Find an unused file structure and return a pointer to it.
182  * Returns an error pointer if some error happend e.g. we over file
183  * structures limit, run out of memory or operation is not permitted.
184  *
185  * Be very careful using this.  You are responsible for
186  * getting write access to any mount that you might assign
187  * to this filp, if it is opened for write.  If this is not
188  * done, you will imbalance int the mount's writer count
189  * and a warning at __fput() time.
190  */
191 struct file *alloc_empty_file(int flags, const struct cred *cred)
192 {
193 	static long old_max;
194 	struct file *f;
195 	int error;
196 
197 	/*
198 	 * Privileged users can go above max_files
199 	 */
200 	if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
201 		/*
202 		 * percpu_counters are inaccurate.  Do an expensive check before
203 		 * we go and fail.
204 		 */
205 		if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
206 			goto over;
207 	}
208 
209 	f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
210 	if (unlikely(!f))
211 		return ERR_PTR(-ENOMEM);
212 
213 	error = init_file(f, flags, cred);
214 	if (unlikely(error)) {
215 		kmem_cache_free(filp_cachep, f);
216 		return ERR_PTR(error);
217 	}
218 
219 	percpu_counter_inc(&nr_files);
220 
221 	return f;
222 
223 over:
224 	/* Ran out of filps - report that */
225 	if (get_nr_files() > old_max) {
226 		pr_info("VFS: file-max limit %lu reached\n", get_max_files());
227 		old_max = get_nr_files();
228 	}
229 	return ERR_PTR(-ENFILE);
230 }
231 
232 /*
233  * Variant of alloc_empty_file() that doesn't check and modify nr_files.
234  *
235  * This is only for kernel internal use, and the allocate file must not be
236  * installed into file tables or such.
237  */
238 struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred)
239 {
240 	struct file *f;
241 	int error;
242 
243 	f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
244 	if (unlikely(!f))
245 		return ERR_PTR(-ENOMEM);
246 
247 	error = init_file(f, flags, cred);
248 	if (unlikely(error)) {
249 		kmem_cache_free(filp_cachep, f);
250 		return ERR_PTR(error);
251 	}
252 
253 	f->f_mode |= FMODE_NOACCOUNT;
254 
255 	return f;
256 }
257 
258 /*
259  * Variant of alloc_empty_file() that allocates a backing_file container
260  * and doesn't check and modify nr_files.
261  *
262  * This is only for kernel internal use, and the allocate file must not be
263  * installed into file tables or such.
264  */
265 struct file *alloc_empty_backing_file(int flags, const struct cred *cred)
266 {
267 	struct backing_file *ff;
268 	int error;
269 
270 	ff = kzalloc(sizeof(struct backing_file), GFP_KERNEL);
271 	if (unlikely(!ff))
272 		return ERR_PTR(-ENOMEM);
273 
274 	error = init_file(&ff->file, flags, cred);
275 	if (unlikely(error)) {
276 		kfree(ff);
277 		return ERR_PTR(error);
278 	}
279 
280 	ff->file.f_mode |= FMODE_BACKING | FMODE_NOACCOUNT;
281 	return &ff->file;
282 }
283 
284 /**
285  * file_init_path - initialize a 'struct file' based on path
286  *
287  * @file: the file to set up
288  * @path: the (dentry, vfsmount) pair for the new file
289  * @fop: the 'struct file_operations' for the new file
290  */
291 static void file_init_path(struct file *file, const struct path *path,
292 			   const struct file_operations *fop)
293 {
294 	file->f_path = *path;
295 	file->f_inode = path->dentry->d_inode;
296 	file->f_mapping = path->dentry->d_inode->i_mapping;
297 	file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
298 	file->f_sb_err = file_sample_sb_err(file);
299 	if (fop->llseek)
300 		file->f_mode |= FMODE_LSEEK;
301 	if ((file->f_mode & FMODE_READ) &&
302 	     likely(fop->read || fop->read_iter))
303 		file->f_mode |= FMODE_CAN_READ;
304 	if ((file->f_mode & FMODE_WRITE) &&
305 	     likely(fop->write || fop->write_iter))
306 		file->f_mode |= FMODE_CAN_WRITE;
307 	file->f_iocb_flags = iocb_flags(file);
308 	file->f_mode |= FMODE_OPENED;
309 	file->f_op = fop;
310 	if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
311 		i_readcount_inc(path->dentry->d_inode);
312 }
313 
314 /**
315  * alloc_file - allocate and initialize a 'struct file'
316  *
317  * @path: the (dentry, vfsmount) pair for the new file
318  * @flags: O_... flags with which the new file will be opened
319  * @fop: the 'struct file_operations' for the new file
320  */
321 static struct file *alloc_file(const struct path *path, int flags,
322 		const struct file_operations *fop)
323 {
324 	struct file *file;
325 
326 	file = alloc_empty_file(flags, current_cred());
327 	if (!IS_ERR(file))
328 		file_init_path(file, path, fop);
329 	return file;
330 }
331 
332 static inline int alloc_path_pseudo(const char *name, struct inode *inode,
333 				    struct vfsmount *mnt, struct path *path)
334 {
335 	struct qstr this = QSTR_INIT(name, strlen(name));
336 
337 	path->dentry = d_alloc_pseudo(mnt->mnt_sb, &this);
338 	if (!path->dentry)
339 		return -ENOMEM;
340 	path->mnt = mntget(mnt);
341 	d_instantiate(path->dentry, inode);
342 	return 0;
343 }
344 
345 struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
346 			       const char *name, int flags,
347 			       const struct file_operations *fops)
348 {
349 	int ret;
350 	struct path path;
351 	struct file *file;
352 
353 	ret = alloc_path_pseudo(name, inode, mnt, &path);
354 	if (ret)
355 		return ERR_PTR(ret);
356 
357 	file = alloc_file(&path, flags, fops);
358 	if (IS_ERR(file)) {
359 		ihold(inode);
360 		path_put(&path);
361 	}
362 	return file;
363 }
364 EXPORT_SYMBOL(alloc_file_pseudo);
365 
366 struct file *alloc_file_pseudo_noaccount(struct inode *inode,
367 					 struct vfsmount *mnt, const char *name,
368 					 int flags,
369 					 const struct file_operations *fops)
370 {
371 	int ret;
372 	struct path path;
373 	struct file *file;
374 
375 	ret = alloc_path_pseudo(name, inode, mnt, &path);
376 	if (ret)
377 		return ERR_PTR(ret);
378 
379 	file = alloc_empty_file_noaccount(flags, current_cred());
380 	if (IS_ERR(file)) {
381 		ihold(inode);
382 		path_put(&path);
383 		return file;
384 	}
385 	file_init_path(file, &path, fops);
386 	return file;
387 }
388 EXPORT_SYMBOL_GPL(alloc_file_pseudo_noaccount);
389 
390 struct file *alloc_file_clone(struct file *base, int flags,
391 				const struct file_operations *fops)
392 {
393 	struct file *f;
394 
395 	f = alloc_file(&base->f_path, flags, fops);
396 	if (!IS_ERR(f)) {
397 		path_get(&f->f_path);
398 		f->f_mapping = base->f_mapping;
399 	}
400 	return f;
401 }
402 
403 /* the real guts of fput() - releasing the last reference to file
404  */
405 static void __fput(struct file *file)
406 {
407 	struct dentry *dentry = file->f_path.dentry;
408 	struct vfsmount *mnt = file->f_path.mnt;
409 	struct inode *inode = file->f_inode;
410 	fmode_t mode = file->f_mode;
411 
412 	if (unlikely(!(file->f_mode & FMODE_OPENED)))
413 		goto out;
414 
415 	might_sleep();
416 
417 	fsnotify_close(file);
418 	/*
419 	 * The function eventpoll_release() should be the first called
420 	 * in the file cleanup chain.
421 	 */
422 	eventpoll_release(file);
423 	locks_remove_file(file);
424 
425 	security_file_release(file);
426 	if (unlikely(file->f_flags & FASYNC)) {
427 		if (file->f_op->fasync)
428 			file->f_op->fasync(-1, file, 0);
429 	}
430 	if (file->f_op->release)
431 		file->f_op->release(inode, file);
432 	if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
433 		     !(mode & FMODE_PATH))) {
434 		cdev_put(inode->i_cdev);
435 	}
436 	fops_put(file->f_op);
437 	file_f_owner_release(file);
438 	put_file_access(file);
439 	dput(dentry);
440 	if (unlikely(mode & FMODE_NEED_UNMOUNT))
441 		dissolve_on_fput(mnt);
442 	mntput(mnt);
443 out:
444 	file_free(file);
445 }
446 
447 static LLIST_HEAD(delayed_fput_list);
448 static void delayed_fput(struct work_struct *unused)
449 {
450 	struct llist_node *node = llist_del_all(&delayed_fput_list);
451 	struct file *f, *t;
452 
453 	llist_for_each_entry_safe(f, t, node, f_llist)
454 		__fput(f);
455 }
456 
457 static void ____fput(struct callback_head *work)
458 {
459 	__fput(container_of(work, struct file, f_task_work));
460 }
461 
462 /*
463  * If kernel thread really needs to have the final fput() it has done
464  * to complete, call this.  The only user right now is the boot - we
465  * *do* need to make sure our writes to binaries on initramfs has
466  * not left us with opened struct file waiting for __fput() - execve()
467  * won't work without that.  Please, don't add more callers without
468  * very good reasons; in particular, never call that with locks
469  * held and never call that from a thread that might need to do
470  * some work on any kind of umount.
471  */
472 void flush_delayed_fput(void)
473 {
474 	delayed_fput(NULL);
475 }
476 EXPORT_SYMBOL_GPL(flush_delayed_fput);
477 
478 static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
479 
480 void fput(struct file *file)
481 {
482 	if (atomic_long_dec_and_test(&file->f_count)) {
483 		struct task_struct *task = current;
484 
485 		if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) {
486 			file_free(file);
487 			return;
488 		}
489 		if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
490 			init_task_work(&file->f_task_work, ____fput);
491 			if (!task_work_add(task, &file->f_task_work, TWA_RESUME))
492 				return;
493 			/*
494 			 * After this task has run exit_task_work(),
495 			 * task_work_add() will fail.  Fall through to delayed
496 			 * fput to avoid leaking *file.
497 			 */
498 		}
499 
500 		if (llist_add(&file->f_llist, &delayed_fput_list))
501 			schedule_delayed_work(&delayed_fput_work, 1);
502 	}
503 }
504 
505 /*
506  * synchronous analog of fput(); for kernel threads that might be needed
507  * in some umount() (and thus can't use flush_delayed_fput() without
508  * risking deadlocks), need to wait for completion of __fput() and know
509  * for this specific struct file it won't involve anything that would
510  * need them.  Use only if you really need it - at the very least,
511  * don't blindly convert fput() by kernel thread to that.
512  */
513 void __fput_sync(struct file *file)
514 {
515 	if (atomic_long_dec_and_test(&file->f_count))
516 		__fput(file);
517 }
518 
519 EXPORT_SYMBOL(fput);
520 EXPORT_SYMBOL(__fput_sync);
521 
522 void __init files_init(void)
523 {
524 	struct kmem_cache_args args = {
525 		.use_freeptr_offset = true,
526 		.freeptr_offset = offsetof(struct file, f_freeptr),
527 	};
528 
529 	filp_cachep = kmem_cache_create("filp", sizeof(struct file), &args,
530 				SLAB_HWCACHE_ALIGN | SLAB_PANIC |
531 				SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
532 	percpu_counter_init(&nr_files, 0, GFP_KERNEL);
533 }
534 
535 /*
536  * One file with associated inode and dcache is very roughly 1K. Per default
537  * do not use more than 10% of our memory for files.
538  */
539 void __init files_maxfiles_init(void)
540 {
541 	unsigned long n;
542 	unsigned long nr_pages = totalram_pages();
543 	unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2;
544 
545 	memreserve = min(memreserve, nr_pages - 1);
546 	n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
547 
548 	files_stat.max_files = max_t(unsigned long, n, NR_FILE);
549 }
550