1 /* 2 * linux/fs/file_table.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 6 */ 7 8 #include <linux/string.h> 9 #include <linux/slab.h> 10 #include <linux/file.h> 11 #include <linux/fdtable.h> 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/fs.h> 15 #include <linux/security.h> 16 #include <linux/eventpoll.h> 17 #include <linux/rcupdate.h> 18 #include <linux/mount.h> 19 #include <linux/capability.h> 20 #include <linux/cdev.h> 21 #include <linux/fsnotify.h> 22 #include <linux/sysctl.h> 23 #include <linux/percpu_counter.h> 24 25 #include <asm/atomic.h> 26 27 /* sysctl tunables... */ 28 struct files_stat_struct files_stat = { 29 .max_files = NR_FILE 30 }; 31 32 /* public. Not pretty! */ 33 __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock); 34 35 /* SLAB cache for file structures */ 36 static struct kmem_cache *filp_cachep __read_mostly; 37 38 static struct percpu_counter nr_files __cacheline_aligned_in_smp; 39 40 static inline void file_free_rcu(struct rcu_head *head) 41 { 42 struct file *f = container_of(head, struct file, f_u.fu_rcuhead); 43 44 put_cred(f->f_cred); 45 kmem_cache_free(filp_cachep, f); 46 } 47 48 static inline void file_free(struct file *f) 49 { 50 percpu_counter_dec(&nr_files); 51 file_check_state(f); 52 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); 53 } 54 55 /* 56 * Return the total number of open files in the system 57 */ 58 static int get_nr_files(void) 59 { 60 return percpu_counter_read_positive(&nr_files); 61 } 62 63 /* 64 * Return the maximum number of open files in the system 65 */ 66 int get_max_files(void) 67 { 68 return files_stat.max_files; 69 } 70 EXPORT_SYMBOL_GPL(get_max_files); 71 72 /* 73 * Handle nr_files sysctl 74 */ 75 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 76 int proc_nr_files(ctl_table *table, int write, struct file *filp, 77 void __user *buffer, size_t *lenp, loff_t *ppos) 78 { 79 files_stat.nr_files = get_nr_files(); 80 return proc_dointvec(table, write, filp, buffer, lenp, ppos); 81 } 82 #else 83 int proc_nr_files(ctl_table *table, int write, struct file *filp, 84 void __user *buffer, size_t *lenp, loff_t *ppos) 85 { 86 return -ENOSYS; 87 } 88 #endif 89 90 /* Find an unused file structure and return a pointer to it. 91 * Returns NULL, if there are no more free file structures or 92 * we run out of memory. 93 * 94 * Be very careful using this. You are responsible for 95 * getting write access to any mount that you might assign 96 * to this filp, if it is opened for write. If this is not 97 * done, you will imbalance int the mount's writer count 98 * and a warning at __fput() time. 99 */ 100 struct file *get_empty_filp(void) 101 { 102 const struct cred *cred = current_cred(); 103 static int old_max; 104 struct file * f; 105 106 /* 107 * Privileged users can go above max_files 108 */ 109 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { 110 /* 111 * percpu_counters are inaccurate. Do an expensive check before 112 * we go and fail. 113 */ 114 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) 115 goto over; 116 } 117 118 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); 119 if (f == NULL) 120 goto fail; 121 122 percpu_counter_inc(&nr_files); 123 if (security_file_alloc(f)) 124 goto fail_sec; 125 126 INIT_LIST_HEAD(&f->f_u.fu_list); 127 atomic_long_set(&f->f_count, 1); 128 rwlock_init(&f->f_owner.lock); 129 f->f_cred = get_cred(cred); 130 eventpoll_init_file(f); 131 /* f->f_version: 0 */ 132 return f; 133 134 over: 135 /* Ran out of filps - report that */ 136 if (get_nr_files() > old_max) { 137 printk(KERN_INFO "VFS: file-max limit %d reached\n", 138 get_max_files()); 139 old_max = get_nr_files(); 140 } 141 goto fail; 142 143 fail_sec: 144 file_free(f); 145 fail: 146 return NULL; 147 } 148 149 EXPORT_SYMBOL(get_empty_filp); 150 151 /** 152 * alloc_file - allocate and initialize a 'struct file' 153 * @mnt: the vfsmount on which the file will reside 154 * @dentry: the dentry representing the new file 155 * @mode: the mode with which the new file will be opened 156 * @fop: the 'struct file_operations' for the new file 157 * 158 * Use this instead of get_empty_filp() to get a new 159 * 'struct file'. Do so because of the same initialization 160 * pitfalls reasons listed for init_file(). This is a 161 * preferred interface to using init_file(). 162 * 163 * If all the callers of init_file() are eliminated, its 164 * code should be moved into this function. 165 */ 166 struct file *alloc_file(struct vfsmount *mnt, struct dentry *dentry, 167 fmode_t mode, const struct file_operations *fop) 168 { 169 struct file *file; 170 struct path; 171 172 file = get_empty_filp(); 173 if (!file) 174 return NULL; 175 176 init_file(file, mnt, dentry, mode, fop); 177 return file; 178 } 179 EXPORT_SYMBOL(alloc_file); 180 181 /** 182 * init_file - initialize a 'struct file' 183 * @file: the already allocated 'struct file' to initialized 184 * @mnt: the vfsmount on which the file resides 185 * @dentry: the dentry representing this file 186 * @mode: the mode the file is opened with 187 * @fop: the 'struct file_operations' for this file 188 * 189 * Use this instead of setting the members directly. Doing so 190 * avoids making mistakes like forgetting the mntget() or 191 * forgetting to take a write on the mnt. 192 * 193 * Note: This is a crappy interface. It is here to make 194 * merging with the existing users of get_empty_filp() 195 * who have complex failure logic easier. All users 196 * of this should be moving to alloc_file(). 197 */ 198 int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry, 199 fmode_t mode, const struct file_operations *fop) 200 { 201 int error = 0; 202 file->f_path.dentry = dentry; 203 file->f_path.mnt = mntget(mnt); 204 file->f_mapping = dentry->d_inode->i_mapping; 205 file->f_mode = mode; 206 file->f_op = fop; 207 208 /* 209 * These mounts don't really matter in practice 210 * for r/o bind mounts. They aren't userspace- 211 * visible. We do this for consistency, and so 212 * that we can do debugging checks at __fput() 213 */ 214 if ((mode & FMODE_WRITE) && !special_file(dentry->d_inode->i_mode)) { 215 file_take_write(file); 216 error = mnt_want_write(mnt); 217 WARN_ON(error); 218 } 219 return error; 220 } 221 EXPORT_SYMBOL(init_file); 222 223 void fput(struct file *file) 224 { 225 if (atomic_long_dec_and_test(&file->f_count)) 226 __fput(file); 227 } 228 229 EXPORT_SYMBOL(fput); 230 231 /** 232 * drop_file_write_access - give up ability to write to a file 233 * @file: the file to which we will stop writing 234 * 235 * This is a central place which will give up the ability 236 * to write to @file, along with access to write through 237 * its vfsmount. 238 */ 239 void drop_file_write_access(struct file *file) 240 { 241 struct vfsmount *mnt = file->f_path.mnt; 242 struct dentry *dentry = file->f_path.dentry; 243 struct inode *inode = dentry->d_inode; 244 245 put_write_access(inode); 246 247 if (special_file(inode->i_mode)) 248 return; 249 if (file_check_writeable(file) != 0) 250 return; 251 mnt_drop_write(mnt); 252 file_release_write(file); 253 } 254 EXPORT_SYMBOL_GPL(drop_file_write_access); 255 256 /* __fput is called from task context when aio completion releases the last 257 * last use of a struct file *. Do not use otherwise. 258 */ 259 void __fput(struct file *file) 260 { 261 struct dentry *dentry = file->f_path.dentry; 262 struct vfsmount *mnt = file->f_path.mnt; 263 struct inode *inode = dentry->d_inode; 264 265 might_sleep(); 266 267 fsnotify_close(file); 268 /* 269 * The function eventpoll_release() should be the first called 270 * in the file cleanup chain. 271 */ 272 eventpoll_release(file); 273 locks_remove_flock(file); 274 275 if (unlikely(file->f_flags & FASYNC)) { 276 if (file->f_op && file->f_op->fasync) 277 file->f_op->fasync(-1, file, 0); 278 } 279 if (file->f_op && file->f_op->release) 280 file->f_op->release(inode, file); 281 security_file_free(file); 282 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) 283 cdev_put(inode->i_cdev); 284 fops_put(file->f_op); 285 put_pid(file->f_owner.pid); 286 file_kill(file); 287 if (file->f_mode & FMODE_WRITE) 288 drop_file_write_access(file); 289 file->f_path.dentry = NULL; 290 file->f_path.mnt = NULL; 291 file_free(file); 292 dput(dentry); 293 mntput(mnt); 294 } 295 296 struct file *fget(unsigned int fd) 297 { 298 struct file *file; 299 struct files_struct *files = current->files; 300 301 rcu_read_lock(); 302 file = fcheck_files(files, fd); 303 if (file) { 304 if (!atomic_long_inc_not_zero(&file->f_count)) { 305 /* File object ref couldn't be taken */ 306 rcu_read_unlock(); 307 return NULL; 308 } 309 } 310 rcu_read_unlock(); 311 312 return file; 313 } 314 315 EXPORT_SYMBOL(fget); 316 317 /* 318 * Lightweight file lookup - no refcnt increment if fd table isn't shared. 319 * You can use this only if it is guranteed that the current task already 320 * holds a refcnt to that file. That check has to be done at fget() only 321 * and a flag is returned to be passed to the corresponding fput_light(). 322 * There must not be a cloning between an fget_light/fput_light pair. 323 */ 324 struct file *fget_light(unsigned int fd, int *fput_needed) 325 { 326 struct file *file; 327 struct files_struct *files = current->files; 328 329 *fput_needed = 0; 330 if (likely((atomic_read(&files->count) == 1))) { 331 file = fcheck_files(files, fd); 332 } else { 333 rcu_read_lock(); 334 file = fcheck_files(files, fd); 335 if (file) { 336 if (atomic_long_inc_not_zero(&file->f_count)) 337 *fput_needed = 1; 338 else 339 /* Didn't get the reference, someone's freed */ 340 file = NULL; 341 } 342 rcu_read_unlock(); 343 } 344 345 return file; 346 } 347 348 349 void put_filp(struct file *file) 350 { 351 if (atomic_long_dec_and_test(&file->f_count)) { 352 security_file_free(file); 353 file_kill(file); 354 file_free(file); 355 } 356 } 357 358 void file_move(struct file *file, struct list_head *list) 359 { 360 if (!list) 361 return; 362 file_list_lock(); 363 list_move(&file->f_u.fu_list, list); 364 file_list_unlock(); 365 } 366 367 void file_kill(struct file *file) 368 { 369 if (!list_empty(&file->f_u.fu_list)) { 370 file_list_lock(); 371 list_del_init(&file->f_u.fu_list); 372 file_list_unlock(); 373 } 374 } 375 376 int fs_may_remount_ro(struct super_block *sb) 377 { 378 struct file *file; 379 380 /* Check that no files are currently opened for writing. */ 381 file_list_lock(); 382 list_for_each_entry(file, &sb->s_files, f_u.fu_list) { 383 struct inode *inode = file->f_path.dentry->d_inode; 384 385 /* File with pending delete? */ 386 if (inode->i_nlink == 0) 387 goto too_bad; 388 389 /* Writeable file? */ 390 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE)) 391 goto too_bad; 392 } 393 file_list_unlock(); 394 return 1; /* Tis' cool bro. */ 395 too_bad: 396 file_list_unlock(); 397 return 0; 398 } 399 400 void __init files_init(unsigned long mempages) 401 { 402 int n; 403 404 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 405 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 406 407 /* 408 * One file with associated inode and dcache is very roughly 1K. 409 * Per default don't use more than 10% of our memory for files. 410 */ 411 412 n = (mempages * (PAGE_SIZE / 1024)) / 10; 413 files_stat.max_files = n; 414 if (files_stat.max_files < NR_FILE) 415 files_stat.max_files = NR_FILE; 416 files_defer_init(); 417 percpu_counter_init(&nr_files, 0); 418 } 419