xref: /linux/fs/file_table.c (revision 20d0021394c1b070bf04b22c5bc8fdb437edd4c5)
1 /*
2  *  linux/fs/file_table.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *  Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6  */
7 
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/smp_lock.h>
14 #include <linux/fs.h>
15 #include <linux/security.h>
16 #include <linux/eventpoll.h>
17 #include <linux/mount.h>
18 #include <linux/cdev.h>
19 #include <linux/fsnotify.h>
20 
21 /* sysctl tunables... */
22 struct files_stat_struct files_stat = {
23 	.max_files = NR_FILE
24 };
25 
26 EXPORT_SYMBOL(files_stat); /* Needed by unix.o */
27 
28 /* public. Not pretty! */
29  __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
30 
31 static DEFINE_SPINLOCK(filp_count_lock);
32 
33 /* slab constructors and destructors are called from arbitrary
34  * context and must be fully threaded - use a local spinlock
35  * to protect files_stat.nr_files
36  */
37 void filp_ctor(void * objp, struct kmem_cache_s *cachep, unsigned long cflags)
38 {
39 	if ((cflags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
40 	    SLAB_CTOR_CONSTRUCTOR) {
41 		unsigned long flags;
42 		spin_lock_irqsave(&filp_count_lock, flags);
43 		files_stat.nr_files++;
44 		spin_unlock_irqrestore(&filp_count_lock, flags);
45 	}
46 }
47 
48 void filp_dtor(void * objp, struct kmem_cache_s *cachep, unsigned long dflags)
49 {
50 	unsigned long flags;
51 	spin_lock_irqsave(&filp_count_lock, flags);
52 	files_stat.nr_files--;
53 	spin_unlock_irqrestore(&filp_count_lock, flags);
54 }
55 
56 static inline void file_free(struct file *f)
57 {
58 	kmem_cache_free(filp_cachep, f);
59 }
60 
61 /* Find an unused file structure and return a pointer to it.
62  * Returns NULL, if there are no more free file structures or
63  * we run out of memory.
64  */
65 struct file *get_empty_filp(void)
66 {
67 	static int old_max;
68 	struct file * f;
69 
70 	/*
71 	 * Privileged users can go above max_files
72 	 */
73 	if (files_stat.nr_files >= files_stat.max_files &&
74 				!capable(CAP_SYS_ADMIN))
75 		goto over;
76 
77 	f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
78 	if (f == NULL)
79 		goto fail;
80 
81 	memset(f, 0, sizeof(*f));
82 	if (security_file_alloc(f))
83 		goto fail_sec;
84 
85 	eventpoll_init_file(f);
86 	atomic_set(&f->f_count, 1);
87 	f->f_uid = current->fsuid;
88 	f->f_gid = current->fsgid;
89 	rwlock_init(&f->f_owner.lock);
90 	/* f->f_version: 0 */
91 	INIT_LIST_HEAD(&f->f_list);
92 	f->f_maxcount = INT_MAX;
93 	return f;
94 
95 over:
96 	/* Ran out of filps - report that */
97 	if (files_stat.nr_files > old_max) {
98 		printk(KERN_INFO "VFS: file-max limit %d reached\n",
99 					files_stat.max_files);
100 		old_max = files_stat.nr_files;
101 	}
102 	goto fail;
103 
104 fail_sec:
105 	file_free(f);
106 fail:
107 	return NULL;
108 }
109 
110 EXPORT_SYMBOL(get_empty_filp);
111 
112 void fastcall fput(struct file *file)
113 {
114 	if (atomic_dec_and_test(&file->f_count))
115 		__fput(file);
116 }
117 
118 EXPORT_SYMBOL(fput);
119 
120 /* __fput is called from task context when aio completion releases the last
121  * last use of a struct file *.  Do not use otherwise.
122  */
123 void fastcall __fput(struct file *file)
124 {
125 	struct dentry *dentry = file->f_dentry;
126 	struct vfsmount *mnt = file->f_vfsmnt;
127 	struct inode *inode = dentry->d_inode;
128 
129 	might_sleep();
130 
131 	fsnotify_close(file);
132 	/*
133 	 * The function eventpoll_release() should be the first called
134 	 * in the file cleanup chain.
135 	 */
136 	eventpoll_release(file);
137 	locks_remove_flock(file);
138 
139 	if (file->f_op && file->f_op->release)
140 		file->f_op->release(inode, file);
141 	security_file_free(file);
142 	if (unlikely(inode->i_cdev != NULL))
143 		cdev_put(inode->i_cdev);
144 	fops_put(file->f_op);
145 	if (file->f_mode & FMODE_WRITE)
146 		put_write_access(inode);
147 	file_kill(file);
148 	file->f_dentry = NULL;
149 	file->f_vfsmnt = NULL;
150 	file_free(file);
151 	dput(dentry);
152 	mntput(mnt);
153 }
154 
155 struct file fastcall *fget(unsigned int fd)
156 {
157 	struct file *file;
158 	struct files_struct *files = current->files;
159 
160 	spin_lock(&files->file_lock);
161 	file = fcheck_files(files, fd);
162 	if (file)
163 		get_file(file);
164 	spin_unlock(&files->file_lock);
165 	return file;
166 }
167 
168 EXPORT_SYMBOL(fget);
169 
170 /*
171  * Lightweight file lookup - no refcnt increment if fd table isn't shared.
172  * You can use this only if it is guranteed that the current task already
173  * holds a refcnt to that file. That check has to be done at fget() only
174  * and a flag is returned to be passed to the corresponding fput_light().
175  * There must not be a cloning between an fget_light/fput_light pair.
176  */
177 struct file fastcall *fget_light(unsigned int fd, int *fput_needed)
178 {
179 	struct file *file;
180 	struct files_struct *files = current->files;
181 
182 	*fput_needed = 0;
183 	if (likely((atomic_read(&files->count) == 1))) {
184 		file = fcheck_files(files, fd);
185 	} else {
186 		spin_lock(&files->file_lock);
187 		file = fcheck_files(files, fd);
188 		if (file) {
189 			get_file(file);
190 			*fput_needed = 1;
191 		}
192 		spin_unlock(&files->file_lock);
193 	}
194 	return file;
195 }
196 
197 
198 void put_filp(struct file *file)
199 {
200 	if (atomic_dec_and_test(&file->f_count)) {
201 		security_file_free(file);
202 		file_kill(file);
203 		file_free(file);
204 	}
205 }
206 
207 void file_move(struct file *file, struct list_head *list)
208 {
209 	if (!list)
210 		return;
211 	file_list_lock();
212 	list_move(&file->f_list, list);
213 	file_list_unlock();
214 }
215 
216 void file_kill(struct file *file)
217 {
218 	if (!list_empty(&file->f_list)) {
219 		file_list_lock();
220 		list_del_init(&file->f_list);
221 		file_list_unlock();
222 	}
223 }
224 
225 int fs_may_remount_ro(struct super_block *sb)
226 {
227 	struct list_head *p;
228 
229 	/* Check that no files are currently opened for writing. */
230 	file_list_lock();
231 	list_for_each(p, &sb->s_files) {
232 		struct file *file = list_entry(p, struct file, f_list);
233 		struct inode *inode = file->f_dentry->d_inode;
234 
235 		/* File with pending delete? */
236 		if (inode->i_nlink == 0)
237 			goto too_bad;
238 
239 		/* Writeable file? */
240 		if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
241 			goto too_bad;
242 	}
243 	file_list_unlock();
244 	return 1; /* Tis' cool bro. */
245 too_bad:
246 	file_list_unlock();
247 	return 0;
248 }
249 
250 void __init files_init(unsigned long mempages)
251 {
252 	int n;
253 	/* One file with associated inode and dcache is very roughly 1K.
254 	 * Per default don't use more than 10% of our memory for files.
255 	 */
256 
257 	n = (mempages * (PAGE_SIZE / 1024)) / 10;
258 	files_stat.max_files = n;
259 	if (files_stat.max_files < NR_FILE)
260 		files_stat.max_files = NR_FILE;
261 }
262