1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/file_table.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 7 */ 8 9 #include <linux/string.h> 10 #include <linux/slab.h> 11 #include <linux/file.h> 12 #include <linux/fdtable.h> 13 #include <linux/init.h> 14 #include <linux/module.h> 15 #include <linux/fs.h> 16 #include <linux/filelock.h> 17 #include <linux/security.h> 18 #include <linux/cred.h> 19 #include <linux/eventpoll.h> 20 #include <linux/rcupdate.h> 21 #include <linux/mount.h> 22 #include <linux/capability.h> 23 #include <linux/cdev.h> 24 #include <linux/fsnotify.h> 25 #include <linux/sysctl.h> 26 #include <linux/percpu_counter.h> 27 #include <linux/percpu.h> 28 #include <linux/task_work.h> 29 #include <linux/ima.h> 30 #include <linux/swap.h> 31 #include <linux/kmemleak.h> 32 33 #include <linux/atomic.h> 34 35 #include "internal.h" 36 37 /* sysctl tunables... */ 38 static struct files_stat_struct files_stat = { 39 .max_files = NR_FILE 40 }; 41 42 /* SLAB cache for file structures */ 43 static struct kmem_cache *filp_cachep __ro_after_init; 44 45 static struct percpu_counter nr_files __cacheline_aligned_in_smp; 46 47 /* Container for backing file with optional user path */ 48 struct backing_file { 49 struct file file; 50 struct path user_path; 51 }; 52 53 static inline struct backing_file *backing_file(struct file *f) 54 { 55 return container_of(f, struct backing_file, file); 56 } 57 58 struct path *backing_file_user_path(struct file *f) 59 { 60 return &backing_file(f)->user_path; 61 } 62 EXPORT_SYMBOL_GPL(backing_file_user_path); 63 64 static inline void file_free(struct file *f) 65 { 66 security_file_free(f); 67 if (likely(!(f->f_mode & FMODE_NOACCOUNT))) 68 percpu_counter_dec(&nr_files); 69 put_cred(f->f_cred); 70 if (unlikely(f->f_mode & FMODE_BACKING)) { 71 path_put(backing_file_user_path(f)); 72 kfree(backing_file(f)); 73 } else { 74 kmem_cache_free(filp_cachep, f); 75 } 76 } 77 78 /* 79 * Return the total number of open files in the system 80 */ 81 static long get_nr_files(void) 82 { 83 return percpu_counter_read_positive(&nr_files); 84 } 85 86 /* 87 * Return the maximum number of open files in the system 88 */ 89 unsigned long get_max_files(void) 90 { 91 return files_stat.max_files; 92 } 93 EXPORT_SYMBOL_GPL(get_max_files); 94 95 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 96 97 /* 98 * Handle nr_files sysctl 99 */ 100 static int proc_nr_files(struct ctl_table *table, int write, void *buffer, 101 size_t *lenp, loff_t *ppos) 102 { 103 files_stat.nr_files = get_nr_files(); 104 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 105 } 106 107 static struct ctl_table fs_stat_sysctls[] = { 108 { 109 .procname = "file-nr", 110 .data = &files_stat, 111 .maxlen = sizeof(files_stat), 112 .mode = 0444, 113 .proc_handler = proc_nr_files, 114 }, 115 { 116 .procname = "file-max", 117 .data = &files_stat.max_files, 118 .maxlen = sizeof(files_stat.max_files), 119 .mode = 0644, 120 .proc_handler = proc_doulongvec_minmax, 121 .extra1 = SYSCTL_LONG_ZERO, 122 .extra2 = SYSCTL_LONG_MAX, 123 }, 124 { 125 .procname = "nr_open", 126 .data = &sysctl_nr_open, 127 .maxlen = sizeof(unsigned int), 128 .mode = 0644, 129 .proc_handler = proc_dointvec_minmax, 130 .extra1 = &sysctl_nr_open_min, 131 .extra2 = &sysctl_nr_open_max, 132 }, 133 { } 134 }; 135 136 static int __init init_fs_stat_sysctls(void) 137 { 138 register_sysctl_init("fs", fs_stat_sysctls); 139 if (IS_ENABLED(CONFIG_BINFMT_MISC)) { 140 struct ctl_table_header *hdr; 141 hdr = register_sysctl_mount_point("fs/binfmt_misc"); 142 kmemleak_not_leak(hdr); 143 } 144 return 0; 145 } 146 fs_initcall(init_fs_stat_sysctls); 147 #endif 148 149 static int init_file(struct file *f, int flags, const struct cred *cred) 150 { 151 int error; 152 153 f->f_cred = get_cred(cred); 154 error = security_file_alloc(f); 155 if (unlikely(error)) { 156 put_cred(f->f_cred); 157 return error; 158 } 159 160 rwlock_init(&f->f_owner.lock); 161 spin_lock_init(&f->f_lock); 162 mutex_init(&f->f_pos_lock); 163 f->f_flags = flags; 164 f->f_mode = OPEN_FMODE(flags); 165 /* f->f_version: 0 */ 166 167 /* 168 * We're SLAB_TYPESAFE_BY_RCU so initialize f_count last. While 169 * fget-rcu pattern users need to be able to handle spurious 170 * refcount bumps we should reinitialize the reused file first. 171 */ 172 atomic_long_set(&f->f_count, 1); 173 return 0; 174 } 175 176 /* Find an unused file structure and return a pointer to it. 177 * Returns an error pointer if some error happend e.g. we over file 178 * structures limit, run out of memory or operation is not permitted. 179 * 180 * Be very careful using this. You are responsible for 181 * getting write access to any mount that you might assign 182 * to this filp, if it is opened for write. If this is not 183 * done, you will imbalance int the mount's writer count 184 * and a warning at __fput() time. 185 */ 186 struct file *alloc_empty_file(int flags, const struct cred *cred) 187 { 188 static long old_max; 189 struct file *f; 190 int error; 191 192 /* 193 * Privileged users can go above max_files 194 */ 195 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { 196 /* 197 * percpu_counters are inaccurate. Do an expensive check before 198 * we go and fail. 199 */ 200 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) 201 goto over; 202 } 203 204 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); 205 if (unlikely(!f)) 206 return ERR_PTR(-ENOMEM); 207 208 error = init_file(f, flags, cred); 209 if (unlikely(error)) { 210 kmem_cache_free(filp_cachep, f); 211 return ERR_PTR(error); 212 } 213 214 percpu_counter_inc(&nr_files); 215 216 return f; 217 218 over: 219 /* Ran out of filps - report that */ 220 if (get_nr_files() > old_max) { 221 pr_info("VFS: file-max limit %lu reached\n", get_max_files()); 222 old_max = get_nr_files(); 223 } 224 return ERR_PTR(-ENFILE); 225 } 226 227 /* 228 * Variant of alloc_empty_file() that doesn't check and modify nr_files. 229 * 230 * This is only for kernel internal use, and the allocate file must not be 231 * installed into file tables or such. 232 */ 233 struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred) 234 { 235 struct file *f; 236 int error; 237 238 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); 239 if (unlikely(!f)) 240 return ERR_PTR(-ENOMEM); 241 242 error = init_file(f, flags, cred); 243 if (unlikely(error)) { 244 kmem_cache_free(filp_cachep, f); 245 return ERR_PTR(error); 246 } 247 248 f->f_mode |= FMODE_NOACCOUNT; 249 250 return f; 251 } 252 253 /* 254 * Variant of alloc_empty_file() that allocates a backing_file container 255 * and doesn't check and modify nr_files. 256 * 257 * This is only for kernel internal use, and the allocate file must not be 258 * installed into file tables or such. 259 */ 260 struct file *alloc_empty_backing_file(int flags, const struct cred *cred) 261 { 262 struct backing_file *ff; 263 int error; 264 265 ff = kzalloc(sizeof(struct backing_file), GFP_KERNEL); 266 if (unlikely(!ff)) 267 return ERR_PTR(-ENOMEM); 268 269 error = init_file(&ff->file, flags, cred); 270 if (unlikely(error)) { 271 kfree(ff); 272 return ERR_PTR(error); 273 } 274 275 ff->file.f_mode |= FMODE_BACKING | FMODE_NOACCOUNT; 276 return &ff->file; 277 } 278 279 /** 280 * alloc_file - allocate and initialize a 'struct file' 281 * 282 * @path: the (dentry, vfsmount) pair for the new file 283 * @flags: O_... flags with which the new file will be opened 284 * @fop: the 'struct file_operations' for the new file 285 */ 286 static struct file *alloc_file(const struct path *path, int flags, 287 const struct file_operations *fop) 288 { 289 struct file *file; 290 291 file = alloc_empty_file(flags, current_cred()); 292 if (IS_ERR(file)) 293 return file; 294 295 file->f_path = *path; 296 file->f_inode = path->dentry->d_inode; 297 file->f_mapping = path->dentry->d_inode->i_mapping; 298 file->f_wb_err = filemap_sample_wb_err(file->f_mapping); 299 file->f_sb_err = file_sample_sb_err(file); 300 if (fop->llseek) 301 file->f_mode |= FMODE_LSEEK; 302 if ((file->f_mode & FMODE_READ) && 303 likely(fop->read || fop->read_iter)) 304 file->f_mode |= FMODE_CAN_READ; 305 if ((file->f_mode & FMODE_WRITE) && 306 likely(fop->write || fop->write_iter)) 307 file->f_mode |= FMODE_CAN_WRITE; 308 file->f_iocb_flags = iocb_flags(file); 309 file->f_mode |= FMODE_OPENED; 310 file->f_op = fop; 311 if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) 312 i_readcount_inc(path->dentry->d_inode); 313 return file; 314 } 315 316 struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt, 317 const char *name, int flags, 318 const struct file_operations *fops) 319 { 320 static const struct dentry_operations anon_ops = { 321 .d_dname = simple_dname 322 }; 323 struct qstr this = QSTR_INIT(name, strlen(name)); 324 struct path path; 325 struct file *file; 326 327 path.dentry = d_alloc_pseudo(mnt->mnt_sb, &this); 328 if (!path.dentry) 329 return ERR_PTR(-ENOMEM); 330 if (!mnt->mnt_sb->s_d_op) 331 d_set_d_op(path.dentry, &anon_ops); 332 path.mnt = mntget(mnt); 333 d_instantiate(path.dentry, inode); 334 file = alloc_file(&path, flags, fops); 335 if (IS_ERR(file)) { 336 ihold(inode); 337 path_put(&path); 338 } 339 return file; 340 } 341 EXPORT_SYMBOL(alloc_file_pseudo); 342 343 struct file *alloc_file_clone(struct file *base, int flags, 344 const struct file_operations *fops) 345 { 346 struct file *f = alloc_file(&base->f_path, flags, fops); 347 if (!IS_ERR(f)) { 348 path_get(&f->f_path); 349 f->f_mapping = base->f_mapping; 350 } 351 return f; 352 } 353 354 /* the real guts of fput() - releasing the last reference to file 355 */ 356 static void __fput(struct file *file) 357 { 358 struct dentry *dentry = file->f_path.dentry; 359 struct vfsmount *mnt = file->f_path.mnt; 360 struct inode *inode = file->f_inode; 361 fmode_t mode = file->f_mode; 362 363 if (unlikely(!(file->f_mode & FMODE_OPENED))) 364 goto out; 365 366 might_sleep(); 367 368 fsnotify_close(file); 369 /* 370 * The function eventpoll_release() should be the first called 371 * in the file cleanup chain. 372 */ 373 eventpoll_release(file); 374 locks_remove_file(file); 375 376 ima_file_free(file); 377 if (unlikely(file->f_flags & FASYNC)) { 378 if (file->f_op->fasync) 379 file->f_op->fasync(-1, file, 0); 380 } 381 if (file->f_op->release) 382 file->f_op->release(inode, file); 383 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL && 384 !(mode & FMODE_PATH))) { 385 cdev_put(inode->i_cdev); 386 } 387 fops_put(file->f_op); 388 put_pid(file->f_owner.pid); 389 put_file_access(file); 390 dput(dentry); 391 if (unlikely(mode & FMODE_NEED_UNMOUNT)) 392 dissolve_on_fput(mnt); 393 mntput(mnt); 394 out: 395 file_free(file); 396 } 397 398 static LLIST_HEAD(delayed_fput_list); 399 static void delayed_fput(struct work_struct *unused) 400 { 401 struct llist_node *node = llist_del_all(&delayed_fput_list); 402 struct file *f, *t; 403 404 llist_for_each_entry_safe(f, t, node, f_llist) 405 __fput(f); 406 } 407 408 static void ____fput(struct callback_head *work) 409 { 410 __fput(container_of(work, struct file, f_task_work)); 411 } 412 413 /* 414 * If kernel thread really needs to have the final fput() it has done 415 * to complete, call this. The only user right now is the boot - we 416 * *do* need to make sure our writes to binaries on initramfs has 417 * not left us with opened struct file waiting for __fput() - execve() 418 * won't work without that. Please, don't add more callers without 419 * very good reasons; in particular, never call that with locks 420 * held and never call that from a thread that might need to do 421 * some work on any kind of umount. 422 */ 423 void flush_delayed_fput(void) 424 { 425 delayed_fput(NULL); 426 } 427 EXPORT_SYMBOL_GPL(flush_delayed_fput); 428 429 static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); 430 431 void fput(struct file *file) 432 { 433 if (atomic_long_dec_and_test(&file->f_count)) { 434 struct task_struct *task = current; 435 436 if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) { 437 file_free(file); 438 return; 439 } 440 if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { 441 init_task_work(&file->f_task_work, ____fput); 442 if (!task_work_add(task, &file->f_task_work, TWA_RESUME)) 443 return; 444 /* 445 * After this task has run exit_task_work(), 446 * task_work_add() will fail. Fall through to delayed 447 * fput to avoid leaking *file. 448 */ 449 } 450 451 if (llist_add(&file->f_llist, &delayed_fput_list)) 452 schedule_delayed_work(&delayed_fput_work, 1); 453 } 454 } 455 456 /* 457 * synchronous analog of fput(); for kernel threads that might be needed 458 * in some umount() (and thus can't use flush_delayed_fput() without 459 * risking deadlocks), need to wait for completion of __fput() and know 460 * for this specific struct file it won't involve anything that would 461 * need them. Use only if you really need it - at the very least, 462 * don't blindly convert fput() by kernel thread to that. 463 */ 464 void __fput_sync(struct file *file) 465 { 466 if (atomic_long_dec_and_test(&file->f_count)) 467 __fput(file); 468 } 469 470 EXPORT_SYMBOL(fput); 471 EXPORT_SYMBOL(__fput_sync); 472 473 void __init files_init(void) 474 { 475 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 476 SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN | 477 SLAB_PANIC | SLAB_ACCOUNT, NULL); 478 percpu_counter_init(&nr_files, 0, GFP_KERNEL); 479 } 480 481 /* 482 * One file with associated inode and dcache is very roughly 1K. Per default 483 * do not use more than 10% of our memory for files. 484 */ 485 void __init files_maxfiles_init(void) 486 { 487 unsigned long n; 488 unsigned long nr_pages = totalram_pages(); 489 unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2; 490 491 memreserve = min(memreserve, nr_pages - 1); 492 n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10; 493 494 files_stat.max_files = max_t(unsigned long, n, NR_FILE); 495 } 496