1 /* 2 * linux/fs/file_table.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 6 */ 7 8 #include <linux/string.h> 9 #include <linux/slab.h> 10 #include <linux/file.h> 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/smp_lock.h> 14 #include <linux/fs.h> 15 #include <linux/security.h> 16 #include <linux/eventpoll.h> 17 #include <linux/mount.h> 18 #include <linux/cdev.h> 19 20 /* sysctl tunables... */ 21 struct files_stat_struct files_stat = { 22 .max_files = NR_FILE 23 }; 24 25 EXPORT_SYMBOL(files_stat); /* Needed by unix.o */ 26 27 /* public. Not pretty! */ 28 __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock); 29 30 static DEFINE_SPINLOCK(filp_count_lock); 31 32 /* slab constructors and destructors are called from arbitrary 33 * context and must be fully threaded - use a local spinlock 34 * to protect files_stat.nr_files 35 */ 36 void filp_ctor(void * objp, struct kmem_cache_s *cachep, unsigned long cflags) 37 { 38 if ((cflags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 39 SLAB_CTOR_CONSTRUCTOR) { 40 unsigned long flags; 41 spin_lock_irqsave(&filp_count_lock, flags); 42 files_stat.nr_files++; 43 spin_unlock_irqrestore(&filp_count_lock, flags); 44 } 45 } 46 47 void filp_dtor(void * objp, struct kmem_cache_s *cachep, unsigned long dflags) 48 { 49 unsigned long flags; 50 spin_lock_irqsave(&filp_count_lock, flags); 51 files_stat.nr_files--; 52 spin_unlock_irqrestore(&filp_count_lock, flags); 53 } 54 55 static inline void file_free(struct file *f) 56 { 57 kmem_cache_free(filp_cachep, f); 58 } 59 60 /* Find an unused file structure and return a pointer to it. 61 * Returns NULL, if there are no more free file structures or 62 * we run out of memory. 63 */ 64 struct file *get_empty_filp(void) 65 { 66 static int old_max; 67 struct file * f; 68 69 /* 70 * Privileged users can go above max_files 71 */ 72 if (files_stat.nr_files >= files_stat.max_files && 73 !capable(CAP_SYS_ADMIN)) 74 goto over; 75 76 f = kmem_cache_alloc(filp_cachep, GFP_KERNEL); 77 if (f == NULL) 78 goto fail; 79 80 memset(f, 0, sizeof(*f)); 81 if (security_file_alloc(f)) 82 goto fail_sec; 83 84 eventpoll_init_file(f); 85 atomic_set(&f->f_count, 1); 86 f->f_uid = current->fsuid; 87 f->f_gid = current->fsgid; 88 rwlock_init(&f->f_owner.lock); 89 /* f->f_version: 0 */ 90 INIT_LIST_HEAD(&f->f_list); 91 f->f_maxcount = INT_MAX; 92 return f; 93 94 over: 95 /* Ran out of filps - report that */ 96 if (files_stat.nr_files > old_max) { 97 printk(KERN_INFO "VFS: file-max limit %d reached\n", 98 files_stat.max_files); 99 old_max = files_stat.nr_files; 100 } 101 goto fail; 102 103 fail_sec: 104 file_free(f); 105 fail: 106 return NULL; 107 } 108 109 EXPORT_SYMBOL(get_empty_filp); 110 111 void fastcall fput(struct file *file) 112 { 113 if (atomic_dec_and_test(&file->f_count)) 114 __fput(file); 115 } 116 117 EXPORT_SYMBOL(fput); 118 119 /* __fput is called from task context when aio completion releases the last 120 * last use of a struct file *. Do not use otherwise. 121 */ 122 void fastcall __fput(struct file *file) 123 { 124 struct dentry *dentry = file->f_dentry; 125 struct vfsmount *mnt = file->f_vfsmnt; 126 struct inode *inode = dentry->d_inode; 127 128 might_sleep(); 129 /* 130 * The function eventpoll_release() should be the first called 131 * in the file cleanup chain. 132 */ 133 eventpoll_release(file); 134 locks_remove_flock(file); 135 136 if (file->f_op && file->f_op->release) 137 file->f_op->release(inode, file); 138 security_file_free(file); 139 if (unlikely(inode->i_cdev != NULL)) 140 cdev_put(inode->i_cdev); 141 fops_put(file->f_op); 142 if (file->f_mode & FMODE_WRITE) 143 put_write_access(inode); 144 file_kill(file); 145 file->f_dentry = NULL; 146 file->f_vfsmnt = NULL; 147 file_free(file); 148 dput(dentry); 149 mntput(mnt); 150 } 151 152 struct file fastcall *fget(unsigned int fd) 153 { 154 struct file *file; 155 struct files_struct *files = current->files; 156 157 spin_lock(&files->file_lock); 158 file = fcheck_files(files, fd); 159 if (file) 160 get_file(file); 161 spin_unlock(&files->file_lock); 162 return file; 163 } 164 165 EXPORT_SYMBOL(fget); 166 167 /* 168 * Lightweight file lookup - no refcnt increment if fd table isn't shared. 169 * You can use this only if it is guranteed that the current task already 170 * holds a refcnt to that file. That check has to be done at fget() only 171 * and a flag is returned to be passed to the corresponding fput_light(). 172 * There must not be a cloning between an fget_light/fput_light pair. 173 */ 174 struct file fastcall *fget_light(unsigned int fd, int *fput_needed) 175 { 176 struct file *file; 177 struct files_struct *files = current->files; 178 179 *fput_needed = 0; 180 if (likely((atomic_read(&files->count) == 1))) { 181 file = fcheck_files(files, fd); 182 } else { 183 spin_lock(&files->file_lock); 184 file = fcheck_files(files, fd); 185 if (file) { 186 get_file(file); 187 *fput_needed = 1; 188 } 189 spin_unlock(&files->file_lock); 190 } 191 return file; 192 } 193 194 195 void put_filp(struct file *file) 196 { 197 if (atomic_dec_and_test(&file->f_count)) { 198 security_file_free(file); 199 file_kill(file); 200 file_free(file); 201 } 202 } 203 204 void file_move(struct file *file, struct list_head *list) 205 { 206 if (!list) 207 return; 208 file_list_lock(); 209 list_move(&file->f_list, list); 210 file_list_unlock(); 211 } 212 213 void file_kill(struct file *file) 214 { 215 if (!list_empty(&file->f_list)) { 216 file_list_lock(); 217 list_del_init(&file->f_list); 218 file_list_unlock(); 219 } 220 } 221 222 int fs_may_remount_ro(struct super_block *sb) 223 { 224 struct list_head *p; 225 226 /* Check that no files are currently opened for writing. */ 227 file_list_lock(); 228 list_for_each(p, &sb->s_files) { 229 struct file *file = list_entry(p, struct file, f_list); 230 struct inode *inode = file->f_dentry->d_inode; 231 232 /* File with pending delete? */ 233 if (inode->i_nlink == 0) 234 goto too_bad; 235 236 /* Writeable file? */ 237 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE)) 238 goto too_bad; 239 } 240 file_list_unlock(); 241 return 1; /* Tis' cool bro. */ 242 too_bad: 243 file_list_unlock(); 244 return 0; 245 } 246 247 void __init files_init(unsigned long mempages) 248 { 249 int n; 250 /* One file with associated inode and dcache is very roughly 1K. 251 * Per default don't use more than 10% of our memory for files. 252 */ 253 254 n = (mempages * (PAGE_SIZE / 1024)) / 10; 255 files_stat.max_files = n; 256 if (files_stat.max_files < NR_FILE) 257 files_stat.max_files = NR_FILE; 258 } 259