1 /* 2 * linux/fs/file_table.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 6 */ 7 8 #include <linux/string.h> 9 #include <linux/slab.h> 10 #include <linux/file.h> 11 #include <linux/fdtable.h> 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/fs.h> 15 #include <linux/security.h> 16 #include <linux/eventpoll.h> 17 #include <linux/rcupdate.h> 18 #include <linux/mount.h> 19 #include <linux/capability.h> 20 #include <linux/cdev.h> 21 #include <linux/fsnotify.h> 22 #include <linux/sysctl.h> 23 #include <linux/lglock.h> 24 #include <linux/percpu_counter.h> 25 #include <linux/percpu.h> 26 #include <linux/hardirq.h> 27 #include <linux/task_work.h> 28 #include <linux/ima.h> 29 30 #include <linux/atomic.h> 31 32 #include "internal.h" 33 34 /* sysctl tunables... */ 35 struct files_stat_struct files_stat = { 36 .max_files = NR_FILE 37 }; 38 39 DEFINE_STATIC_LGLOCK(files_lglock); 40 41 /* SLAB cache for file structures */ 42 static struct kmem_cache *filp_cachep __read_mostly; 43 44 static struct percpu_counter nr_files __cacheline_aligned_in_smp; 45 46 static void file_free_rcu(struct rcu_head *head) 47 { 48 struct file *f = container_of(head, struct file, f_u.fu_rcuhead); 49 50 put_cred(f->f_cred); 51 kmem_cache_free(filp_cachep, f); 52 } 53 54 static inline void file_free(struct file *f) 55 { 56 percpu_counter_dec(&nr_files); 57 file_check_state(f); 58 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); 59 } 60 61 /* 62 * Return the total number of open files in the system 63 */ 64 static long get_nr_files(void) 65 { 66 return percpu_counter_read_positive(&nr_files); 67 } 68 69 /* 70 * Return the maximum number of open files in the system 71 */ 72 unsigned long get_max_files(void) 73 { 74 return files_stat.max_files; 75 } 76 EXPORT_SYMBOL_GPL(get_max_files); 77 78 /* 79 * Handle nr_files sysctl 80 */ 81 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 82 int proc_nr_files(ctl_table *table, int write, 83 void __user *buffer, size_t *lenp, loff_t *ppos) 84 { 85 files_stat.nr_files = get_nr_files(); 86 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 87 } 88 #else 89 int proc_nr_files(ctl_table *table, int write, 90 void __user *buffer, size_t *lenp, loff_t *ppos) 91 { 92 return -ENOSYS; 93 } 94 #endif 95 96 /* Find an unused file structure and return a pointer to it. 97 * Returns an error pointer if some error happend e.g. we over file 98 * structures limit, run out of memory or operation is not permitted. 99 * 100 * Be very careful using this. You are responsible for 101 * getting write access to any mount that you might assign 102 * to this filp, if it is opened for write. If this is not 103 * done, you will imbalance int the mount's writer count 104 * and a warning at __fput() time. 105 */ 106 struct file *get_empty_filp(void) 107 { 108 const struct cred *cred = current_cred(); 109 static long old_max; 110 struct file *f; 111 int error; 112 113 /* 114 * Privileged users can go above max_files 115 */ 116 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { 117 /* 118 * percpu_counters are inaccurate. Do an expensive check before 119 * we go and fail. 120 */ 121 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) 122 goto over; 123 } 124 125 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); 126 if (unlikely(!f)) 127 return ERR_PTR(-ENOMEM); 128 129 percpu_counter_inc(&nr_files); 130 f->f_cred = get_cred(cred); 131 error = security_file_alloc(f); 132 if (unlikely(error)) { 133 file_free(f); 134 return ERR_PTR(error); 135 } 136 137 INIT_LIST_HEAD(&f->f_u.fu_list); 138 atomic_long_set(&f->f_count, 1); 139 rwlock_init(&f->f_owner.lock); 140 spin_lock_init(&f->f_lock); 141 eventpoll_init_file(f); 142 /* f->f_version: 0 */ 143 return f; 144 145 over: 146 /* Ran out of filps - report that */ 147 if (get_nr_files() > old_max) { 148 pr_info("VFS: file-max limit %lu reached\n", get_max_files()); 149 old_max = get_nr_files(); 150 } 151 return ERR_PTR(-ENFILE); 152 } 153 154 /** 155 * alloc_file - allocate and initialize a 'struct file' 156 * @mnt: the vfsmount on which the file will reside 157 * @dentry: the dentry representing the new file 158 * @mode: the mode with which the new file will be opened 159 * @fop: the 'struct file_operations' for the new file 160 * 161 * Use this instead of get_empty_filp() to get a new 162 * 'struct file'. Do so because of the same initialization 163 * pitfalls reasons listed for init_file(). This is a 164 * preferred interface to using init_file(). 165 * 166 * If all the callers of init_file() are eliminated, its 167 * code should be moved into this function. 168 */ 169 struct file *alloc_file(struct path *path, fmode_t mode, 170 const struct file_operations *fop) 171 { 172 struct file *file; 173 174 file = get_empty_filp(); 175 if (IS_ERR(file)) 176 return file; 177 178 file->f_path = *path; 179 file->f_inode = path->dentry->d_inode; 180 file->f_mapping = path->dentry->d_inode->i_mapping; 181 file->f_mode = mode; 182 file->f_op = fop; 183 184 /* 185 * These mounts don't really matter in practice 186 * for r/o bind mounts. They aren't userspace- 187 * visible. We do this for consistency, and so 188 * that we can do debugging checks at __fput() 189 */ 190 if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) { 191 file_take_write(file); 192 WARN_ON(mnt_clone_write(path->mnt)); 193 } 194 if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) 195 i_readcount_inc(path->dentry->d_inode); 196 return file; 197 } 198 EXPORT_SYMBOL(alloc_file); 199 200 /** 201 * drop_file_write_access - give up ability to write to a file 202 * @file: the file to which we will stop writing 203 * 204 * This is a central place which will give up the ability 205 * to write to @file, along with access to write through 206 * its vfsmount. 207 */ 208 static void drop_file_write_access(struct file *file) 209 { 210 struct vfsmount *mnt = file->f_path.mnt; 211 struct dentry *dentry = file->f_path.dentry; 212 struct inode *inode = dentry->d_inode; 213 214 put_write_access(inode); 215 216 if (special_file(inode->i_mode)) 217 return; 218 if (file_check_writeable(file) != 0) 219 return; 220 __mnt_drop_write(mnt); 221 file_release_write(file); 222 } 223 224 /* the real guts of fput() - releasing the last reference to file 225 */ 226 static void __fput(struct file *file) 227 { 228 struct dentry *dentry = file->f_path.dentry; 229 struct vfsmount *mnt = file->f_path.mnt; 230 struct inode *inode = file->f_inode; 231 232 might_sleep(); 233 234 fsnotify_close(file); 235 /* 236 * The function eventpoll_release() should be the first called 237 * in the file cleanup chain. 238 */ 239 eventpoll_release(file); 240 locks_remove_flock(file); 241 242 if (unlikely(file->f_flags & FASYNC)) { 243 if (file->f_op && file->f_op->fasync) 244 file->f_op->fasync(-1, file, 0); 245 } 246 ima_file_free(file); 247 if (file->f_op && file->f_op->release) 248 file->f_op->release(inode, file); 249 security_file_free(file); 250 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL && 251 !(file->f_mode & FMODE_PATH))) { 252 cdev_put(inode->i_cdev); 253 } 254 fops_put(file->f_op); 255 put_pid(file->f_owner.pid); 256 if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) 257 i_readcount_dec(inode); 258 if (file->f_mode & FMODE_WRITE) 259 drop_file_write_access(file); 260 file->f_path.dentry = NULL; 261 file->f_path.mnt = NULL; 262 file->f_inode = NULL; 263 file_free(file); 264 dput(dentry); 265 mntput(mnt); 266 } 267 268 static LLIST_HEAD(delayed_fput_list); 269 static void delayed_fput(struct work_struct *unused) 270 { 271 struct llist_node *node = llist_del_all(&delayed_fput_list); 272 struct llist_node *next; 273 274 for (; node; node = next) { 275 next = llist_next(node); 276 __fput(llist_entry(node, struct file, f_u.fu_llist)); 277 } 278 } 279 280 static void ____fput(struct callback_head *work) 281 { 282 __fput(container_of(work, struct file, f_u.fu_rcuhead)); 283 } 284 285 /* 286 * If kernel thread really needs to have the final fput() it has done 287 * to complete, call this. The only user right now is the boot - we 288 * *do* need to make sure our writes to binaries on initramfs has 289 * not left us with opened struct file waiting for __fput() - execve() 290 * won't work without that. Please, don't add more callers without 291 * very good reasons; in particular, never call that with locks 292 * held and never call that from a thread that might need to do 293 * some work on any kind of umount. 294 */ 295 void flush_delayed_fput(void) 296 { 297 delayed_fput(NULL); 298 } 299 300 static DECLARE_WORK(delayed_fput_work, delayed_fput); 301 302 void fput(struct file *file) 303 { 304 if (atomic_long_dec_and_test(&file->f_count)) { 305 struct task_struct *task = current; 306 307 file_sb_list_del(file); 308 if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { 309 init_task_work(&file->f_u.fu_rcuhead, ____fput); 310 if (!task_work_add(task, &file->f_u.fu_rcuhead, true)) 311 return; 312 /* 313 * After this task has run exit_task_work(), 314 * task_work_add() will fail. free_ipc_ns()-> 315 * shm_destroy() can do this. Fall through to delayed 316 * fput to avoid leaking *file. 317 */ 318 } 319 320 if (llist_add(&file->f_u.fu_llist, &delayed_fput_list)) 321 schedule_work(&delayed_fput_work); 322 } 323 } 324 325 /* 326 * synchronous analog of fput(); for kernel threads that might be needed 327 * in some umount() (and thus can't use flush_delayed_fput() without 328 * risking deadlocks), need to wait for completion of __fput() and know 329 * for this specific struct file it won't involve anything that would 330 * need them. Use only if you really need it - at the very least, 331 * don't blindly convert fput() by kernel thread to that. 332 */ 333 void __fput_sync(struct file *file) 334 { 335 if (atomic_long_dec_and_test(&file->f_count)) { 336 struct task_struct *task = current; 337 file_sb_list_del(file); 338 BUG_ON(!(task->flags & PF_KTHREAD)); 339 __fput(file); 340 } 341 } 342 343 EXPORT_SYMBOL(fput); 344 345 void put_filp(struct file *file) 346 { 347 if (atomic_long_dec_and_test(&file->f_count)) { 348 security_file_free(file); 349 file_sb_list_del(file); 350 file_free(file); 351 } 352 } 353 354 static inline int file_list_cpu(struct file *file) 355 { 356 #ifdef CONFIG_SMP 357 return file->f_sb_list_cpu; 358 #else 359 return smp_processor_id(); 360 #endif 361 } 362 363 /* helper for file_sb_list_add to reduce ifdefs */ 364 static inline void __file_sb_list_add(struct file *file, struct super_block *sb) 365 { 366 struct list_head *list; 367 #ifdef CONFIG_SMP 368 int cpu; 369 cpu = smp_processor_id(); 370 file->f_sb_list_cpu = cpu; 371 list = per_cpu_ptr(sb->s_files, cpu); 372 #else 373 list = &sb->s_files; 374 #endif 375 list_add(&file->f_u.fu_list, list); 376 } 377 378 /** 379 * file_sb_list_add - add a file to the sb's file list 380 * @file: file to add 381 * @sb: sb to add it to 382 * 383 * Use this function to associate a file with the superblock of the inode it 384 * refers to. 385 */ 386 void file_sb_list_add(struct file *file, struct super_block *sb) 387 { 388 lg_local_lock(&files_lglock); 389 __file_sb_list_add(file, sb); 390 lg_local_unlock(&files_lglock); 391 } 392 393 /** 394 * file_sb_list_del - remove a file from the sb's file list 395 * @file: file to remove 396 * @sb: sb to remove it from 397 * 398 * Use this function to remove a file from its superblock. 399 */ 400 void file_sb_list_del(struct file *file) 401 { 402 if (!list_empty(&file->f_u.fu_list)) { 403 lg_local_lock_cpu(&files_lglock, file_list_cpu(file)); 404 list_del_init(&file->f_u.fu_list); 405 lg_local_unlock_cpu(&files_lglock, file_list_cpu(file)); 406 } 407 } 408 409 #ifdef CONFIG_SMP 410 411 /* 412 * These macros iterate all files on all CPUs for a given superblock. 413 * files_lglock must be held globally. 414 */ 415 #define do_file_list_for_each_entry(__sb, __file) \ 416 { \ 417 int i; \ 418 for_each_possible_cpu(i) { \ 419 struct list_head *list; \ 420 list = per_cpu_ptr((__sb)->s_files, i); \ 421 list_for_each_entry((__file), list, f_u.fu_list) 422 423 #define while_file_list_for_each_entry \ 424 } \ 425 } 426 427 #else 428 429 #define do_file_list_for_each_entry(__sb, __file) \ 430 { \ 431 struct list_head *list; \ 432 list = &(sb)->s_files; \ 433 list_for_each_entry((__file), list, f_u.fu_list) 434 435 #define while_file_list_for_each_entry \ 436 } 437 438 #endif 439 440 /** 441 * mark_files_ro - mark all files read-only 442 * @sb: superblock in question 443 * 444 * All files are marked read-only. We don't care about pending 445 * delete files so this should be used in 'force' mode only. 446 */ 447 void mark_files_ro(struct super_block *sb) 448 { 449 struct file *f; 450 451 lg_global_lock(&files_lglock); 452 do_file_list_for_each_entry(sb, f) { 453 if (!S_ISREG(file_inode(f)->i_mode)) 454 continue; 455 if (!file_count(f)) 456 continue; 457 if (!(f->f_mode & FMODE_WRITE)) 458 continue; 459 spin_lock(&f->f_lock); 460 f->f_mode &= ~FMODE_WRITE; 461 spin_unlock(&f->f_lock); 462 if (file_check_writeable(f) != 0) 463 continue; 464 __mnt_drop_write(f->f_path.mnt); 465 file_release_write(f); 466 } while_file_list_for_each_entry; 467 lg_global_unlock(&files_lglock); 468 } 469 470 void __init files_init(unsigned long mempages) 471 { 472 unsigned long n; 473 474 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 475 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 476 477 /* 478 * One file with associated inode and dcache is very roughly 1K. 479 * Per default don't use more than 10% of our memory for files. 480 */ 481 482 n = (mempages * (PAGE_SIZE / 1024)) / 10; 483 files_stat.max_files = max_t(unsigned long, n, NR_FILE); 484 files_defer_init(); 485 lg_lock_init(&files_lglock, "files_lglock"); 486 percpu_counter_init(&nr_files, 0); 487 } 488