xref: /linux/fs/file_table.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /*
2  *  linux/fs/file_table.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *  Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6  */
7 
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/fdtable.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/security.h>
16 #include <linux/eventpoll.h>
17 #include <linux/rcupdate.h>
18 #include <linux/mount.h>
19 #include <linux/capability.h>
20 #include <linux/cdev.h>
21 #include <linux/fsnotify.h>
22 #include <linux/sysctl.h>
23 #include <linux/lglock.h>
24 #include <linux/percpu_counter.h>
25 #include <linux/percpu.h>
26 #include <linux/hardirq.h>
27 #include <linux/task_work.h>
28 #include <linux/ima.h>
29 
30 #include <linux/atomic.h>
31 
32 #include "internal.h"
33 
34 /* sysctl tunables... */
35 struct files_stat_struct files_stat = {
36 	.max_files = NR_FILE
37 };
38 
39 DEFINE_STATIC_LGLOCK(files_lglock);
40 
41 /* SLAB cache for file structures */
42 static struct kmem_cache *filp_cachep __read_mostly;
43 
44 static struct percpu_counter nr_files __cacheline_aligned_in_smp;
45 
46 static void file_free_rcu(struct rcu_head *head)
47 {
48 	struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
49 
50 	put_cred(f->f_cred);
51 	kmem_cache_free(filp_cachep, f);
52 }
53 
54 static inline void file_free(struct file *f)
55 {
56 	percpu_counter_dec(&nr_files);
57 	file_check_state(f);
58 	call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
59 }
60 
61 /*
62  * Return the total number of open files in the system
63  */
64 static long get_nr_files(void)
65 {
66 	return percpu_counter_read_positive(&nr_files);
67 }
68 
69 /*
70  * Return the maximum number of open files in the system
71  */
72 unsigned long get_max_files(void)
73 {
74 	return files_stat.max_files;
75 }
76 EXPORT_SYMBOL_GPL(get_max_files);
77 
78 /*
79  * Handle nr_files sysctl
80  */
81 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
82 int proc_nr_files(ctl_table *table, int write,
83                      void __user *buffer, size_t *lenp, loff_t *ppos)
84 {
85 	files_stat.nr_files = get_nr_files();
86 	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
87 }
88 #else
89 int proc_nr_files(ctl_table *table, int write,
90                      void __user *buffer, size_t *lenp, loff_t *ppos)
91 {
92 	return -ENOSYS;
93 }
94 #endif
95 
96 /* Find an unused file structure and return a pointer to it.
97  * Returns NULL, if there are no more free file structures or
98  * we run out of memory.
99  *
100  * Be very careful using this.  You are responsible for
101  * getting write access to any mount that you might assign
102  * to this filp, if it is opened for write.  If this is not
103  * done, you will imbalance int the mount's writer count
104  * and a warning at __fput() time.
105  */
106 struct file *get_empty_filp(void)
107 {
108 	const struct cred *cred = current_cred();
109 	static long old_max;
110 	struct file * f;
111 
112 	/*
113 	 * Privileged users can go above max_files
114 	 */
115 	if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
116 		/*
117 		 * percpu_counters are inaccurate.  Do an expensive check before
118 		 * we go and fail.
119 		 */
120 		if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
121 			goto over;
122 	}
123 
124 	f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
125 	if (f == NULL)
126 		goto fail;
127 
128 	percpu_counter_inc(&nr_files);
129 	f->f_cred = get_cred(cred);
130 	if (security_file_alloc(f))
131 		goto fail_sec;
132 
133 	INIT_LIST_HEAD(&f->f_u.fu_list);
134 	atomic_long_set(&f->f_count, 1);
135 	rwlock_init(&f->f_owner.lock);
136 	spin_lock_init(&f->f_lock);
137 	eventpoll_init_file(f);
138 	/* f->f_version: 0 */
139 	return f;
140 
141 over:
142 	/* Ran out of filps - report that */
143 	if (get_nr_files() > old_max) {
144 		pr_info("VFS: file-max limit %lu reached\n", get_max_files());
145 		old_max = get_nr_files();
146 	}
147 	goto fail;
148 
149 fail_sec:
150 	file_free(f);
151 fail:
152 	return NULL;
153 }
154 
155 /**
156  * alloc_file - allocate and initialize a 'struct file'
157  * @mnt: the vfsmount on which the file will reside
158  * @dentry: the dentry representing the new file
159  * @mode: the mode with which the new file will be opened
160  * @fop: the 'struct file_operations' for the new file
161  *
162  * Use this instead of get_empty_filp() to get a new
163  * 'struct file'.  Do so because of the same initialization
164  * pitfalls reasons listed for init_file().  This is a
165  * preferred interface to using init_file().
166  *
167  * If all the callers of init_file() are eliminated, its
168  * code should be moved into this function.
169  */
170 struct file *alloc_file(struct path *path, fmode_t mode,
171 		const struct file_operations *fop)
172 {
173 	struct file *file;
174 
175 	file = get_empty_filp();
176 	if (!file)
177 		return NULL;
178 
179 	file->f_path = *path;
180 	file->f_mapping = path->dentry->d_inode->i_mapping;
181 	file->f_mode = mode;
182 	file->f_op = fop;
183 
184 	/*
185 	 * These mounts don't really matter in practice
186 	 * for r/o bind mounts.  They aren't userspace-
187 	 * visible.  We do this for consistency, and so
188 	 * that we can do debugging checks at __fput()
189 	 */
190 	if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
191 		file_take_write(file);
192 		WARN_ON(mnt_clone_write(path->mnt));
193 	}
194 	if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
195 		i_readcount_inc(path->dentry->d_inode);
196 	return file;
197 }
198 EXPORT_SYMBOL(alloc_file);
199 
200 /**
201  * drop_file_write_access - give up ability to write to a file
202  * @file: the file to which we will stop writing
203  *
204  * This is a central place which will give up the ability
205  * to write to @file, along with access to write through
206  * its vfsmount.
207  */
208 static void drop_file_write_access(struct file *file)
209 {
210 	struct vfsmount *mnt = file->f_path.mnt;
211 	struct dentry *dentry = file->f_path.dentry;
212 	struct inode *inode = dentry->d_inode;
213 
214 	put_write_access(inode);
215 
216 	if (special_file(inode->i_mode))
217 		return;
218 	if (file_check_writeable(file) != 0)
219 		return;
220 	__mnt_drop_write(mnt);
221 	file_release_write(file);
222 }
223 
224 /* the real guts of fput() - releasing the last reference to file
225  */
226 static void __fput(struct file *file)
227 {
228 	struct dentry *dentry = file->f_path.dentry;
229 	struct vfsmount *mnt = file->f_path.mnt;
230 	struct inode *inode = dentry->d_inode;
231 
232 	might_sleep();
233 
234 	fsnotify_close(file);
235 	/*
236 	 * The function eventpoll_release() should be the first called
237 	 * in the file cleanup chain.
238 	 */
239 	eventpoll_release(file);
240 	locks_remove_flock(file);
241 
242 	if (unlikely(file->f_flags & FASYNC)) {
243 		if (file->f_op && file->f_op->fasync)
244 			file->f_op->fasync(-1, file, 0);
245 	}
246 	ima_file_free(file);
247 	if (file->f_op && file->f_op->release)
248 		file->f_op->release(inode, file);
249 	security_file_free(file);
250 	if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
251 		     !(file->f_mode & FMODE_PATH))) {
252 		cdev_put(inode->i_cdev);
253 	}
254 	fops_put(file->f_op);
255 	put_pid(file->f_owner.pid);
256 	if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
257 		i_readcount_dec(inode);
258 	if (file->f_mode & FMODE_WRITE)
259 		drop_file_write_access(file);
260 	file->f_path.dentry = NULL;
261 	file->f_path.mnt = NULL;
262 	file_free(file);
263 	dput(dentry);
264 	mntput(mnt);
265 }
266 
267 static DEFINE_SPINLOCK(delayed_fput_lock);
268 static LIST_HEAD(delayed_fput_list);
269 static void delayed_fput(struct work_struct *unused)
270 {
271 	LIST_HEAD(head);
272 	spin_lock_irq(&delayed_fput_lock);
273 	list_splice_init(&delayed_fput_list, &head);
274 	spin_unlock_irq(&delayed_fput_lock);
275 	while (!list_empty(&head)) {
276 		struct file *f = list_first_entry(&head, struct file, f_u.fu_list);
277 		list_del_init(&f->f_u.fu_list);
278 		__fput(f);
279 	}
280 }
281 
282 static void ____fput(struct callback_head *work)
283 {
284 	__fput(container_of(work, struct file, f_u.fu_rcuhead));
285 }
286 
287 /*
288  * If kernel thread really needs to have the final fput() it has done
289  * to complete, call this.  The only user right now is the boot - we
290  * *do* need to make sure our writes to binaries on initramfs has
291  * not left us with opened struct file waiting for __fput() - execve()
292  * won't work without that.  Please, don't add more callers without
293  * very good reasons; in particular, never call that with locks
294  * held and never call that from a thread that might need to do
295  * some work on any kind of umount.
296  */
297 void flush_delayed_fput(void)
298 {
299 	delayed_fput(NULL);
300 }
301 
302 static DECLARE_WORK(delayed_fput_work, delayed_fput);
303 
304 void fput(struct file *file)
305 {
306 	if (atomic_long_dec_and_test(&file->f_count)) {
307 		struct task_struct *task = current;
308 		file_sb_list_del(file);
309 		if (unlikely(in_interrupt() || task->flags & PF_KTHREAD)) {
310 			unsigned long flags;
311 			spin_lock_irqsave(&delayed_fput_lock, flags);
312 			list_add(&file->f_u.fu_list, &delayed_fput_list);
313 			schedule_work(&delayed_fput_work);
314 			spin_unlock_irqrestore(&delayed_fput_lock, flags);
315 			return;
316 		}
317 		init_task_work(&file->f_u.fu_rcuhead, ____fput);
318 		task_work_add(task, &file->f_u.fu_rcuhead, true);
319 	}
320 }
321 
322 /*
323  * synchronous analog of fput(); for kernel threads that might be needed
324  * in some umount() (and thus can't use flush_delayed_fput() without
325  * risking deadlocks), need to wait for completion of __fput() and know
326  * for this specific struct file it won't involve anything that would
327  * need them.  Use only if you really need it - at the very least,
328  * don't blindly convert fput() by kernel thread to that.
329  */
330 void __fput_sync(struct file *file)
331 {
332 	if (atomic_long_dec_and_test(&file->f_count)) {
333 		struct task_struct *task = current;
334 		file_sb_list_del(file);
335 		BUG_ON(!(task->flags & PF_KTHREAD));
336 		__fput(file);
337 	}
338 }
339 
340 EXPORT_SYMBOL(fput);
341 
342 void put_filp(struct file *file)
343 {
344 	if (atomic_long_dec_and_test(&file->f_count)) {
345 		security_file_free(file);
346 		file_sb_list_del(file);
347 		file_free(file);
348 	}
349 }
350 
351 static inline int file_list_cpu(struct file *file)
352 {
353 #ifdef CONFIG_SMP
354 	return file->f_sb_list_cpu;
355 #else
356 	return smp_processor_id();
357 #endif
358 }
359 
360 /* helper for file_sb_list_add to reduce ifdefs */
361 static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
362 {
363 	struct list_head *list;
364 #ifdef CONFIG_SMP
365 	int cpu;
366 	cpu = smp_processor_id();
367 	file->f_sb_list_cpu = cpu;
368 	list = per_cpu_ptr(sb->s_files, cpu);
369 #else
370 	list = &sb->s_files;
371 #endif
372 	list_add(&file->f_u.fu_list, list);
373 }
374 
375 /**
376  * file_sb_list_add - add a file to the sb's file list
377  * @file: file to add
378  * @sb: sb to add it to
379  *
380  * Use this function to associate a file with the superblock of the inode it
381  * refers to.
382  */
383 void file_sb_list_add(struct file *file, struct super_block *sb)
384 {
385 	lg_local_lock(&files_lglock);
386 	__file_sb_list_add(file, sb);
387 	lg_local_unlock(&files_lglock);
388 }
389 
390 /**
391  * file_sb_list_del - remove a file from the sb's file list
392  * @file: file to remove
393  * @sb: sb to remove it from
394  *
395  * Use this function to remove a file from its superblock.
396  */
397 void file_sb_list_del(struct file *file)
398 {
399 	if (!list_empty(&file->f_u.fu_list)) {
400 		lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
401 		list_del_init(&file->f_u.fu_list);
402 		lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
403 	}
404 }
405 
406 #ifdef CONFIG_SMP
407 
408 /*
409  * These macros iterate all files on all CPUs for a given superblock.
410  * files_lglock must be held globally.
411  */
412 #define do_file_list_for_each_entry(__sb, __file)		\
413 {								\
414 	int i;							\
415 	for_each_possible_cpu(i) {				\
416 		struct list_head *list;				\
417 		list = per_cpu_ptr((__sb)->s_files, i);		\
418 		list_for_each_entry((__file), list, f_u.fu_list)
419 
420 #define while_file_list_for_each_entry				\
421 	}							\
422 }
423 
424 #else
425 
426 #define do_file_list_for_each_entry(__sb, __file)		\
427 {								\
428 	struct list_head *list;					\
429 	list = &(sb)->s_files;					\
430 	list_for_each_entry((__file), list, f_u.fu_list)
431 
432 #define while_file_list_for_each_entry				\
433 }
434 
435 #endif
436 
437 /**
438  *	mark_files_ro - mark all files read-only
439  *	@sb: superblock in question
440  *
441  *	All files are marked read-only.  We don't care about pending
442  *	delete files so this should be used in 'force' mode only.
443  */
444 void mark_files_ro(struct super_block *sb)
445 {
446 	struct file *f;
447 
448 	lg_global_lock(&files_lglock);
449 	do_file_list_for_each_entry(sb, f) {
450 		if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
451 		       continue;
452 		if (!file_count(f))
453 			continue;
454 		if (!(f->f_mode & FMODE_WRITE))
455 			continue;
456 		spin_lock(&f->f_lock);
457 		f->f_mode &= ~FMODE_WRITE;
458 		spin_unlock(&f->f_lock);
459 		if (file_check_writeable(f) != 0)
460 			continue;
461 		file_release_write(f);
462 		mnt_drop_write_file(f);
463 	} while_file_list_for_each_entry;
464 	lg_global_unlock(&files_lglock);
465 }
466 
467 void __init files_init(unsigned long mempages)
468 {
469 	unsigned long n;
470 
471 	filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
472 			SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
473 
474 	/*
475 	 * One file with associated inode and dcache is very roughly 1K.
476 	 * Per default don't use more than 10% of our memory for files.
477 	 */
478 
479 	n = (mempages * (PAGE_SIZE / 1024)) / 10;
480 	files_stat.max_files = max_t(unsigned long, n, NR_FILE);
481 	files_defer_init();
482 	lg_lock_init(&files_lglock, "files_lglock");
483 	percpu_counter_init(&nr_files, 0);
484 }
485