1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/file_table.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
7 */
8
9 #include <linux/string.h>
10 #include <linux/slab.h>
11 #include <linux/file.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/security.h>
17 #include <linux/cred.h>
18 #include <linux/eventpoll.h>
19 #include <linux/rcupdate.h>
20 #include <linux/mount.h>
21 #include <linux/capability.h>
22 #include <linux/cdev.h>
23 #include <linux/fsnotify.h>
24 #include <linux/sysctl.h>
25 #include <linux/percpu_counter.h>
26 #include <linux/percpu.h>
27 #include <linux/task_work.h>
28 #include <linux/swap.h>
29 #include <linux/kmemleak.h>
30
31 #include <linux/atomic.h>
32
33 #include "internal.h"
34
35 /* sysctl tunables... */
36 static struct files_stat_struct files_stat = {
37 .max_files = NR_FILE
38 };
39
40 /* SLAB cache for file structures */
41 static struct kmem_cache *filp_cachep __ro_after_init;
42 static struct kmem_cache *bfilp_cachep __ro_after_init;
43
44 static struct percpu_counter nr_files __cacheline_aligned_in_smp;
45
46 /* Container for backing file with optional user path */
47 struct backing_file {
48 struct file file;
49 union {
50 struct path user_path;
51 freeptr_t bf_freeptr;
52 };
53 };
54
backing_file(struct file * f)55 static inline struct backing_file *backing_file(struct file *f)
56 {
57 return container_of(f, struct backing_file, file);
58 }
59
backing_file_user_path(struct file * f)60 struct path *backing_file_user_path(struct file *f)
61 {
62 return &backing_file(f)->user_path;
63 }
64 EXPORT_SYMBOL_GPL(backing_file_user_path);
65
file_free(struct file * f)66 static inline void file_free(struct file *f)
67 {
68 security_file_free(f);
69 if (likely(!(f->f_mode & FMODE_NOACCOUNT)))
70 percpu_counter_dec(&nr_files);
71 put_cred(f->f_cred);
72 if (unlikely(f->f_mode & FMODE_BACKING)) {
73 path_put(backing_file_user_path(f));
74 kmem_cache_free(bfilp_cachep, backing_file(f));
75 } else {
76 kmem_cache_free(filp_cachep, f);
77 }
78 }
79
80 /*
81 * Return the total number of open files in the system
82 */
get_nr_files(void)83 static long get_nr_files(void)
84 {
85 return percpu_counter_read_positive(&nr_files);
86 }
87
88 /*
89 * Return the maximum number of open files in the system
90 */
get_max_files(void)91 unsigned long get_max_files(void)
92 {
93 return files_stat.max_files;
94 }
95 EXPORT_SYMBOL_GPL(get_max_files);
96
97 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
98
99 /*
100 * Handle nr_files sysctl
101 */
proc_nr_files(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)102 static int proc_nr_files(const struct ctl_table *table, int write, void *buffer,
103 size_t *lenp, loff_t *ppos)
104 {
105 files_stat.nr_files = get_nr_files();
106 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
107 }
108
109 static struct ctl_table fs_stat_sysctls[] = {
110 {
111 .procname = "file-nr",
112 .data = &files_stat,
113 .maxlen = sizeof(files_stat),
114 .mode = 0444,
115 .proc_handler = proc_nr_files,
116 },
117 {
118 .procname = "file-max",
119 .data = &files_stat.max_files,
120 .maxlen = sizeof(files_stat.max_files),
121 .mode = 0644,
122 .proc_handler = proc_doulongvec_minmax,
123 .extra1 = SYSCTL_LONG_ZERO,
124 .extra2 = SYSCTL_LONG_MAX,
125 },
126 {
127 .procname = "nr_open",
128 .data = &sysctl_nr_open,
129 .maxlen = sizeof(unsigned int),
130 .mode = 0644,
131 .proc_handler = proc_dointvec_minmax,
132 .extra1 = &sysctl_nr_open_min,
133 .extra2 = &sysctl_nr_open_max,
134 },
135 };
136
init_fs_stat_sysctls(void)137 static int __init init_fs_stat_sysctls(void)
138 {
139 register_sysctl_init("fs", fs_stat_sysctls);
140 if (IS_ENABLED(CONFIG_BINFMT_MISC)) {
141 struct ctl_table_header *hdr;
142
143 hdr = register_sysctl_mount_point("fs/binfmt_misc");
144 kmemleak_not_leak(hdr);
145 }
146 return 0;
147 }
148 fs_initcall(init_fs_stat_sysctls);
149 #endif
150
init_file(struct file * f,int flags,const struct cred * cred)151 static int init_file(struct file *f, int flags, const struct cred *cred)
152 {
153 int error;
154
155 f->f_cred = get_cred(cred);
156 error = security_file_alloc(f);
157 if (unlikely(error)) {
158 put_cred(f->f_cred);
159 return error;
160 }
161
162 spin_lock_init(&f->f_lock);
163 /*
164 * Note that f_pos_lock is only used for files raising
165 * FMODE_ATOMIC_POS and directories. Other files such as pipes
166 * don't need it and since f_pos_lock is in a union may reuse
167 * the space for other purposes. They are expected to initialize
168 * the respective member when opening the file.
169 */
170 mutex_init(&f->f_pos_lock);
171 memset(&f->f_path, 0, sizeof(f->f_path));
172 memset(&f->f_ra, 0, sizeof(f->f_ra));
173
174 f->f_flags = flags;
175 f->f_mode = OPEN_FMODE(flags);
176
177 f->f_op = NULL;
178 f->f_mapping = NULL;
179 f->private_data = NULL;
180 f->f_inode = NULL;
181 f->f_owner = NULL;
182 #ifdef CONFIG_EPOLL
183 f->f_ep = NULL;
184 #endif
185
186 f->f_iocb_flags = 0;
187 f->f_pos = 0;
188 f->f_wb_err = 0;
189 f->f_sb_err = 0;
190
191 /*
192 * We're SLAB_TYPESAFE_BY_RCU so initialize f_count last. While
193 * fget-rcu pattern users need to be able to handle spurious
194 * refcount bumps we should reinitialize the reused file first.
195 */
196 file_ref_init(&f->f_ref, 1);
197 return 0;
198 }
199
200 /* Find an unused file structure and return a pointer to it.
201 * Returns an error pointer if some error happend e.g. we over file
202 * structures limit, run out of memory or operation is not permitted.
203 *
204 * Be very careful using this. You are responsible for
205 * getting write access to any mount that you might assign
206 * to this filp, if it is opened for write. If this is not
207 * done, you will imbalance int the mount's writer count
208 * and a warning at __fput() time.
209 */
alloc_empty_file(int flags,const struct cred * cred)210 struct file *alloc_empty_file(int flags, const struct cred *cred)
211 {
212 static long old_max;
213 struct file *f;
214 int error;
215
216 /*
217 * Privileged users can go above max_files
218 */
219 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
220 /*
221 * percpu_counters are inaccurate. Do an expensive check before
222 * we go and fail.
223 */
224 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
225 goto over;
226 }
227
228 f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
229 if (unlikely(!f))
230 return ERR_PTR(-ENOMEM);
231
232 error = init_file(f, flags, cred);
233 if (unlikely(error)) {
234 kmem_cache_free(filp_cachep, f);
235 return ERR_PTR(error);
236 }
237
238 percpu_counter_inc(&nr_files);
239
240 return f;
241
242 over:
243 /* Ran out of filps - report that */
244 if (get_nr_files() > old_max) {
245 pr_info("VFS: file-max limit %lu reached\n", get_max_files());
246 old_max = get_nr_files();
247 }
248 return ERR_PTR(-ENFILE);
249 }
250
251 /*
252 * Variant of alloc_empty_file() that doesn't check and modify nr_files.
253 *
254 * This is only for kernel internal use, and the allocate file must not be
255 * installed into file tables or such.
256 */
alloc_empty_file_noaccount(int flags,const struct cred * cred)257 struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred)
258 {
259 struct file *f;
260 int error;
261
262 f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
263 if (unlikely(!f))
264 return ERR_PTR(-ENOMEM);
265
266 error = init_file(f, flags, cred);
267 if (unlikely(error)) {
268 kmem_cache_free(filp_cachep, f);
269 return ERR_PTR(error);
270 }
271
272 f->f_mode |= FMODE_NOACCOUNT;
273
274 return f;
275 }
276
277 /*
278 * Variant of alloc_empty_file() that allocates a backing_file container
279 * and doesn't check and modify nr_files.
280 *
281 * This is only for kernel internal use, and the allocate file must not be
282 * installed into file tables or such.
283 */
alloc_empty_backing_file(int flags,const struct cred * cred)284 struct file *alloc_empty_backing_file(int flags, const struct cred *cred)
285 {
286 struct backing_file *ff;
287 int error;
288
289 ff = kmem_cache_alloc(bfilp_cachep, GFP_KERNEL);
290 if (unlikely(!ff))
291 return ERR_PTR(-ENOMEM);
292
293 error = init_file(&ff->file, flags, cred);
294 if (unlikely(error)) {
295 kmem_cache_free(bfilp_cachep, ff);
296 return ERR_PTR(error);
297 }
298
299 ff->file.f_mode |= FMODE_BACKING | FMODE_NOACCOUNT;
300 return &ff->file;
301 }
302
303 /**
304 * file_init_path - initialize a 'struct file' based on path
305 *
306 * @file: the file to set up
307 * @path: the (dentry, vfsmount) pair for the new file
308 * @fop: the 'struct file_operations' for the new file
309 */
file_init_path(struct file * file,const struct path * path,const struct file_operations * fop)310 static void file_init_path(struct file *file, const struct path *path,
311 const struct file_operations *fop)
312 {
313 file->f_path = *path;
314 file->f_inode = path->dentry->d_inode;
315 file->f_mapping = path->dentry->d_inode->i_mapping;
316 file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
317 file->f_sb_err = file_sample_sb_err(file);
318 if (fop->llseek)
319 file->f_mode |= FMODE_LSEEK;
320 if ((file->f_mode & FMODE_READ) &&
321 likely(fop->read || fop->read_iter))
322 file->f_mode |= FMODE_CAN_READ;
323 if ((file->f_mode & FMODE_WRITE) &&
324 likely(fop->write || fop->write_iter))
325 file->f_mode |= FMODE_CAN_WRITE;
326 file->f_iocb_flags = iocb_flags(file);
327 file->f_mode |= FMODE_OPENED;
328 file->f_op = fop;
329 if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
330 i_readcount_inc(path->dentry->d_inode);
331 }
332
333 /**
334 * alloc_file - allocate and initialize a 'struct file'
335 *
336 * @path: the (dentry, vfsmount) pair for the new file
337 * @flags: O_... flags with which the new file will be opened
338 * @fop: the 'struct file_operations' for the new file
339 */
alloc_file(const struct path * path,int flags,const struct file_operations * fop)340 static struct file *alloc_file(const struct path *path, int flags,
341 const struct file_operations *fop)
342 {
343 struct file *file;
344
345 file = alloc_empty_file(flags, current_cred());
346 if (!IS_ERR(file))
347 file_init_path(file, path, fop);
348 return file;
349 }
350
alloc_path_pseudo(const char * name,struct inode * inode,struct vfsmount * mnt,struct path * path)351 static inline int alloc_path_pseudo(const char *name, struct inode *inode,
352 struct vfsmount *mnt, struct path *path)
353 {
354 struct qstr this = QSTR_INIT(name, strlen(name));
355
356 path->dentry = d_alloc_pseudo(mnt->mnt_sb, &this);
357 if (!path->dentry)
358 return -ENOMEM;
359 path->mnt = mntget(mnt);
360 d_instantiate(path->dentry, inode);
361 return 0;
362 }
363
alloc_file_pseudo(struct inode * inode,struct vfsmount * mnt,const char * name,int flags,const struct file_operations * fops)364 struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
365 const char *name, int flags,
366 const struct file_operations *fops)
367 {
368 int ret;
369 struct path path;
370 struct file *file;
371
372 ret = alloc_path_pseudo(name, inode, mnt, &path);
373 if (ret)
374 return ERR_PTR(ret);
375
376 file = alloc_file(&path, flags, fops);
377 if (IS_ERR(file)) {
378 ihold(inode);
379 path_put(&path);
380 }
381 return file;
382 }
383 EXPORT_SYMBOL(alloc_file_pseudo);
384
alloc_file_pseudo_noaccount(struct inode * inode,struct vfsmount * mnt,const char * name,int flags,const struct file_operations * fops)385 struct file *alloc_file_pseudo_noaccount(struct inode *inode,
386 struct vfsmount *mnt, const char *name,
387 int flags,
388 const struct file_operations *fops)
389 {
390 int ret;
391 struct path path;
392 struct file *file;
393
394 ret = alloc_path_pseudo(name, inode, mnt, &path);
395 if (ret)
396 return ERR_PTR(ret);
397
398 file = alloc_empty_file_noaccount(flags, current_cred());
399 if (IS_ERR(file)) {
400 ihold(inode);
401 path_put(&path);
402 return file;
403 }
404 file_init_path(file, &path, fops);
405 return file;
406 }
407 EXPORT_SYMBOL_GPL(alloc_file_pseudo_noaccount);
408
alloc_file_clone(struct file * base,int flags,const struct file_operations * fops)409 struct file *alloc_file_clone(struct file *base, int flags,
410 const struct file_operations *fops)
411 {
412 struct file *f;
413
414 f = alloc_file(&base->f_path, flags, fops);
415 if (!IS_ERR(f)) {
416 path_get(&f->f_path);
417 f->f_mapping = base->f_mapping;
418 }
419 return f;
420 }
421
422 /* the real guts of fput() - releasing the last reference to file
423 */
__fput(struct file * file)424 static void __fput(struct file *file)
425 {
426 struct dentry *dentry = file->f_path.dentry;
427 struct vfsmount *mnt = file->f_path.mnt;
428 struct inode *inode = file->f_inode;
429 fmode_t mode = file->f_mode;
430
431 if (unlikely(!(file->f_mode & FMODE_OPENED)))
432 goto out;
433
434 might_sleep();
435
436 fsnotify_close(file);
437 /*
438 * The function eventpoll_release() should be the first called
439 * in the file cleanup chain.
440 */
441 eventpoll_release(file);
442 locks_remove_file(file);
443
444 security_file_release(file);
445 if (unlikely(file->f_flags & FASYNC)) {
446 if (file->f_op->fasync)
447 file->f_op->fasync(-1, file, 0);
448 }
449 if (file->f_op->release)
450 file->f_op->release(inode, file);
451 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
452 !(mode & FMODE_PATH))) {
453 cdev_put(inode->i_cdev);
454 }
455 fops_put(file->f_op);
456 file_f_owner_release(file);
457 put_file_access(file);
458 dput(dentry);
459 if (unlikely(mode & FMODE_NEED_UNMOUNT))
460 dissolve_on_fput(mnt);
461 mntput(mnt);
462 out:
463 file_free(file);
464 }
465
466 static LLIST_HEAD(delayed_fput_list);
delayed_fput(struct work_struct * unused)467 static void delayed_fput(struct work_struct *unused)
468 {
469 struct llist_node *node = llist_del_all(&delayed_fput_list);
470 struct file *f, *t;
471
472 llist_for_each_entry_safe(f, t, node, f_llist)
473 __fput(f);
474 }
475
____fput(struct callback_head * work)476 static void ____fput(struct callback_head *work)
477 {
478 __fput(container_of(work, struct file, f_task_work));
479 }
480
481 /*
482 * If kernel thread really needs to have the final fput() it has done
483 * to complete, call this. The only user right now is the boot - we
484 * *do* need to make sure our writes to binaries on initramfs has
485 * not left us with opened struct file waiting for __fput() - execve()
486 * won't work without that. Please, don't add more callers without
487 * very good reasons; in particular, never call that with locks
488 * held and never call that from a thread that might need to do
489 * some work on any kind of umount.
490 */
flush_delayed_fput(void)491 void flush_delayed_fput(void)
492 {
493 delayed_fput(NULL);
494 }
495 EXPORT_SYMBOL_GPL(flush_delayed_fput);
496
497 static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
498
fput(struct file * file)499 void fput(struct file *file)
500 {
501 if (file_ref_put(&file->f_ref)) {
502 struct task_struct *task = current;
503
504 if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) {
505 file_free(file);
506 return;
507 }
508 if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
509 init_task_work(&file->f_task_work, ____fput);
510 if (!task_work_add(task, &file->f_task_work, TWA_RESUME))
511 return;
512 /*
513 * After this task has run exit_task_work(),
514 * task_work_add() will fail. Fall through to delayed
515 * fput to avoid leaking *file.
516 */
517 }
518
519 if (llist_add(&file->f_llist, &delayed_fput_list))
520 schedule_delayed_work(&delayed_fput_work, 1);
521 }
522 }
523
524 /*
525 * synchronous analog of fput(); for kernel threads that might be needed
526 * in some umount() (and thus can't use flush_delayed_fput() without
527 * risking deadlocks), need to wait for completion of __fput() and know
528 * for this specific struct file it won't involve anything that would
529 * need them. Use only if you really need it - at the very least,
530 * don't blindly convert fput() by kernel thread to that.
531 */
__fput_sync(struct file * file)532 void __fput_sync(struct file *file)
533 {
534 if (file_ref_put(&file->f_ref))
535 __fput(file);
536 }
537
538 EXPORT_SYMBOL(fput);
539 EXPORT_SYMBOL(__fput_sync);
540
files_init(void)541 void __init files_init(void)
542 {
543 struct kmem_cache_args args = {
544 .use_freeptr_offset = true,
545 .freeptr_offset = offsetof(struct file, f_freeptr),
546 };
547
548 filp_cachep = kmem_cache_create("filp", sizeof(struct file), &args,
549 SLAB_HWCACHE_ALIGN | SLAB_PANIC |
550 SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
551
552 args.freeptr_offset = offsetof(struct backing_file, bf_freeptr);
553 bfilp_cachep = kmem_cache_create("bfilp", sizeof(struct backing_file),
554 &args, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
555 SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
556 percpu_counter_init(&nr_files, 0, GFP_KERNEL);
557 }
558
559 /*
560 * One file with associated inode and dcache is very roughly 1K. Per default
561 * do not use more than 10% of our memory for files.
562 */
files_maxfiles_init(void)563 void __init files_maxfiles_init(void)
564 {
565 unsigned long n;
566 unsigned long nr_pages = totalram_pages();
567 unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2;
568
569 memreserve = min(memreserve, nr_pages - 1);
570 n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
571
572 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
573 }
574