1 /* 2 * (C) 1997 Linus Torvalds 3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation) 4 */ 5 #include <linux/export.h> 6 #include <linux/fs.h> 7 #include <linux/mm.h> 8 #include <linux/backing-dev.h> 9 #include <linux/hash.h> 10 #include <linux/swap.h> 11 #include <linux/security.h> 12 #include <linux/cdev.h> 13 #include <linux/bootmem.h> 14 #include <linux/fsnotify.h> 15 #include <linux/mount.h> 16 #include <linux/posix_acl.h> 17 #include <linux/prefetch.h> 18 #include <linux/buffer_head.h> /* for inode_has_buffers */ 19 #include <linux/ratelimit.h> 20 #include <linux/list_lru.h> 21 #include "internal.h" 22 23 /* 24 * Inode locking rules: 25 * 26 * inode->i_lock protects: 27 * inode->i_state, inode->i_hash, __iget() 28 * Inode LRU list locks protect: 29 * inode->i_sb->s_inode_lru, inode->i_lru 30 * inode_sb_list_lock protects: 31 * sb->s_inodes, inode->i_sb_list 32 * bdi->wb.list_lock protects: 33 * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list 34 * inode_hash_lock protects: 35 * inode_hashtable, inode->i_hash 36 * 37 * Lock ordering: 38 * 39 * inode_sb_list_lock 40 * inode->i_lock 41 * Inode LRU list locks 42 * 43 * bdi->wb.list_lock 44 * inode->i_lock 45 * 46 * inode_hash_lock 47 * inode_sb_list_lock 48 * inode->i_lock 49 * 50 * iunique_lock 51 * inode_hash_lock 52 */ 53 54 static unsigned int i_hash_mask __read_mostly; 55 static unsigned int i_hash_shift __read_mostly; 56 static struct hlist_head *inode_hashtable __read_mostly; 57 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 58 59 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); 60 61 /* 62 * Empty aops. Can be used for the cases where the user does not 63 * define any of the address_space operations. 64 */ 65 const struct address_space_operations empty_aops = { 66 }; 67 EXPORT_SYMBOL(empty_aops); 68 69 /* 70 * Statistics gathering.. 71 */ 72 struct inodes_stat_t inodes_stat; 73 74 static DEFINE_PER_CPU(unsigned long, nr_inodes); 75 static DEFINE_PER_CPU(unsigned long, nr_unused); 76 77 static struct kmem_cache *inode_cachep __read_mostly; 78 79 static long get_nr_inodes(void) 80 { 81 int i; 82 long sum = 0; 83 for_each_possible_cpu(i) 84 sum += per_cpu(nr_inodes, i); 85 return sum < 0 ? 0 : sum; 86 } 87 88 static inline long get_nr_inodes_unused(void) 89 { 90 int i; 91 long sum = 0; 92 for_each_possible_cpu(i) 93 sum += per_cpu(nr_unused, i); 94 return sum < 0 ? 0 : sum; 95 } 96 97 long get_nr_dirty_inodes(void) 98 { 99 /* not actually dirty inodes, but a wild approximation */ 100 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); 101 return nr_dirty > 0 ? nr_dirty : 0; 102 } 103 104 /* 105 * Handle nr_inode sysctl 106 */ 107 #ifdef CONFIG_SYSCTL 108 int proc_nr_inodes(struct ctl_table *table, int write, 109 void __user *buffer, size_t *lenp, loff_t *ppos) 110 { 111 inodes_stat.nr_inodes = get_nr_inodes(); 112 inodes_stat.nr_unused = get_nr_inodes_unused(); 113 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 114 } 115 #endif 116 117 /** 118 * inode_init_always - perform inode structure intialisation 119 * @sb: superblock inode belongs to 120 * @inode: inode to initialise 121 * 122 * These are initializations that need to be done on every inode 123 * allocation as the fields are not initialised by slab allocation. 124 */ 125 int inode_init_always(struct super_block *sb, struct inode *inode) 126 { 127 static const struct inode_operations empty_iops; 128 static const struct file_operations empty_fops; 129 struct address_space *const mapping = &inode->i_data; 130 131 inode->i_sb = sb; 132 inode->i_blkbits = sb->s_blocksize_bits; 133 inode->i_flags = 0; 134 atomic_set(&inode->i_count, 1); 135 inode->i_op = &empty_iops; 136 inode->i_fop = &empty_fops; 137 inode->__i_nlink = 1; 138 inode->i_opflags = 0; 139 i_uid_write(inode, 0); 140 i_gid_write(inode, 0); 141 atomic_set(&inode->i_writecount, 0); 142 inode->i_size = 0; 143 inode->i_blocks = 0; 144 inode->i_bytes = 0; 145 inode->i_generation = 0; 146 #ifdef CONFIG_QUOTA 147 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); 148 #endif 149 inode->i_pipe = NULL; 150 inode->i_bdev = NULL; 151 inode->i_cdev = NULL; 152 inode->i_rdev = 0; 153 inode->dirtied_when = 0; 154 155 if (security_inode_alloc(inode)) 156 goto out; 157 spin_lock_init(&inode->i_lock); 158 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 159 160 mutex_init(&inode->i_mutex); 161 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); 162 163 atomic_set(&inode->i_dio_count, 0); 164 165 mapping->a_ops = &empty_aops; 166 mapping->host = inode; 167 mapping->flags = 0; 168 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 169 mapping->private_data = NULL; 170 mapping->backing_dev_info = &default_backing_dev_info; 171 mapping->writeback_index = 0; 172 173 /* 174 * If the block_device provides a backing_dev_info for client 175 * inodes then use that. Otherwise the inode share the bdev's 176 * backing_dev_info. 177 */ 178 if (sb->s_bdev) { 179 struct backing_dev_info *bdi; 180 181 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; 182 mapping->backing_dev_info = bdi; 183 } 184 inode->i_private = NULL; 185 inode->i_mapping = mapping; 186 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ 187 #ifdef CONFIG_FS_POSIX_ACL 188 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; 189 #endif 190 191 #ifdef CONFIG_FSNOTIFY 192 inode->i_fsnotify_mask = 0; 193 #endif 194 195 this_cpu_inc(nr_inodes); 196 197 return 0; 198 out: 199 return -ENOMEM; 200 } 201 EXPORT_SYMBOL(inode_init_always); 202 203 static struct inode *alloc_inode(struct super_block *sb) 204 { 205 struct inode *inode; 206 207 if (sb->s_op->alloc_inode) 208 inode = sb->s_op->alloc_inode(sb); 209 else 210 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); 211 212 if (!inode) 213 return NULL; 214 215 if (unlikely(inode_init_always(sb, inode))) { 216 if (inode->i_sb->s_op->destroy_inode) 217 inode->i_sb->s_op->destroy_inode(inode); 218 else 219 kmem_cache_free(inode_cachep, inode); 220 return NULL; 221 } 222 223 return inode; 224 } 225 226 void free_inode_nonrcu(struct inode *inode) 227 { 228 kmem_cache_free(inode_cachep, inode); 229 } 230 EXPORT_SYMBOL(free_inode_nonrcu); 231 232 void __destroy_inode(struct inode *inode) 233 { 234 BUG_ON(inode_has_buffers(inode)); 235 security_inode_free(inode); 236 fsnotify_inode_delete(inode); 237 if (!inode->i_nlink) { 238 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 239 atomic_long_dec(&inode->i_sb->s_remove_count); 240 } 241 242 #ifdef CONFIG_FS_POSIX_ACL 243 if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED) 244 posix_acl_release(inode->i_acl); 245 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) 246 posix_acl_release(inode->i_default_acl); 247 #endif 248 this_cpu_dec(nr_inodes); 249 } 250 EXPORT_SYMBOL(__destroy_inode); 251 252 static void i_callback(struct rcu_head *head) 253 { 254 struct inode *inode = container_of(head, struct inode, i_rcu); 255 kmem_cache_free(inode_cachep, inode); 256 } 257 258 static void destroy_inode(struct inode *inode) 259 { 260 BUG_ON(!list_empty(&inode->i_lru)); 261 __destroy_inode(inode); 262 if (inode->i_sb->s_op->destroy_inode) 263 inode->i_sb->s_op->destroy_inode(inode); 264 else 265 call_rcu(&inode->i_rcu, i_callback); 266 } 267 268 /** 269 * drop_nlink - directly drop an inode's link count 270 * @inode: inode 271 * 272 * This is a low-level filesystem helper to replace any 273 * direct filesystem manipulation of i_nlink. In cases 274 * where we are attempting to track writes to the 275 * filesystem, a decrement to zero means an imminent 276 * write when the file is truncated and actually unlinked 277 * on the filesystem. 278 */ 279 void drop_nlink(struct inode *inode) 280 { 281 WARN_ON(inode->i_nlink == 0); 282 inode->__i_nlink--; 283 if (!inode->i_nlink) 284 atomic_long_inc(&inode->i_sb->s_remove_count); 285 } 286 EXPORT_SYMBOL(drop_nlink); 287 288 /** 289 * clear_nlink - directly zero an inode's link count 290 * @inode: inode 291 * 292 * This is a low-level filesystem helper to replace any 293 * direct filesystem manipulation of i_nlink. See 294 * drop_nlink() for why we care about i_nlink hitting zero. 295 */ 296 void clear_nlink(struct inode *inode) 297 { 298 if (inode->i_nlink) { 299 inode->__i_nlink = 0; 300 atomic_long_inc(&inode->i_sb->s_remove_count); 301 } 302 } 303 EXPORT_SYMBOL(clear_nlink); 304 305 /** 306 * set_nlink - directly set an inode's link count 307 * @inode: inode 308 * @nlink: new nlink (should be non-zero) 309 * 310 * This is a low-level filesystem helper to replace any 311 * direct filesystem manipulation of i_nlink. 312 */ 313 void set_nlink(struct inode *inode, unsigned int nlink) 314 { 315 if (!nlink) { 316 clear_nlink(inode); 317 } else { 318 /* Yes, some filesystems do change nlink from zero to one */ 319 if (inode->i_nlink == 0) 320 atomic_long_dec(&inode->i_sb->s_remove_count); 321 322 inode->__i_nlink = nlink; 323 } 324 } 325 EXPORT_SYMBOL(set_nlink); 326 327 /** 328 * inc_nlink - directly increment an inode's link count 329 * @inode: inode 330 * 331 * This is a low-level filesystem helper to replace any 332 * direct filesystem manipulation of i_nlink. Currently, 333 * it is only here for parity with dec_nlink(). 334 */ 335 void inc_nlink(struct inode *inode) 336 { 337 if (unlikely(inode->i_nlink == 0)) { 338 WARN_ON(!(inode->i_state & I_LINKABLE)); 339 atomic_long_dec(&inode->i_sb->s_remove_count); 340 } 341 342 inode->__i_nlink++; 343 } 344 EXPORT_SYMBOL(inc_nlink); 345 346 void address_space_init_once(struct address_space *mapping) 347 { 348 memset(mapping, 0, sizeof(*mapping)); 349 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); 350 spin_lock_init(&mapping->tree_lock); 351 mutex_init(&mapping->i_mmap_mutex); 352 INIT_LIST_HEAD(&mapping->private_list); 353 spin_lock_init(&mapping->private_lock); 354 mapping->i_mmap = RB_ROOT; 355 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); 356 } 357 EXPORT_SYMBOL(address_space_init_once); 358 359 /* 360 * These are initializations that only need to be done 361 * once, because the fields are idempotent across use 362 * of the inode, so let the slab aware of that. 363 */ 364 void inode_init_once(struct inode *inode) 365 { 366 memset(inode, 0, sizeof(*inode)); 367 INIT_HLIST_NODE(&inode->i_hash); 368 INIT_LIST_HEAD(&inode->i_devices); 369 INIT_LIST_HEAD(&inode->i_wb_list); 370 INIT_LIST_HEAD(&inode->i_lru); 371 address_space_init_once(&inode->i_data); 372 i_size_ordered_init(inode); 373 #ifdef CONFIG_FSNOTIFY 374 INIT_HLIST_HEAD(&inode->i_fsnotify_marks); 375 #endif 376 } 377 EXPORT_SYMBOL(inode_init_once); 378 379 static void init_once(void *foo) 380 { 381 struct inode *inode = (struct inode *) foo; 382 383 inode_init_once(inode); 384 } 385 386 /* 387 * inode->i_lock must be held 388 */ 389 void __iget(struct inode *inode) 390 { 391 atomic_inc(&inode->i_count); 392 } 393 394 /* 395 * get additional reference to inode; caller must already hold one. 396 */ 397 void ihold(struct inode *inode) 398 { 399 WARN_ON(atomic_inc_return(&inode->i_count) < 2); 400 } 401 EXPORT_SYMBOL(ihold); 402 403 static void inode_lru_list_add(struct inode *inode) 404 { 405 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) 406 this_cpu_inc(nr_unused); 407 } 408 409 /* 410 * Add inode to LRU if needed (inode is unused and clean). 411 * 412 * Needs inode->i_lock held. 413 */ 414 void inode_add_lru(struct inode *inode) 415 { 416 if (!(inode->i_state & (I_DIRTY | I_SYNC | I_FREEING | I_WILL_FREE)) && 417 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE) 418 inode_lru_list_add(inode); 419 } 420 421 422 static void inode_lru_list_del(struct inode *inode) 423 { 424 425 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) 426 this_cpu_dec(nr_unused); 427 } 428 429 /** 430 * inode_sb_list_add - add inode to the superblock list of inodes 431 * @inode: inode to add 432 */ 433 void inode_sb_list_add(struct inode *inode) 434 { 435 spin_lock(&inode_sb_list_lock); 436 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); 437 spin_unlock(&inode_sb_list_lock); 438 } 439 EXPORT_SYMBOL_GPL(inode_sb_list_add); 440 441 static inline void inode_sb_list_del(struct inode *inode) 442 { 443 if (!list_empty(&inode->i_sb_list)) { 444 spin_lock(&inode_sb_list_lock); 445 list_del_init(&inode->i_sb_list); 446 spin_unlock(&inode_sb_list_lock); 447 } 448 } 449 450 static unsigned long hash(struct super_block *sb, unsigned long hashval) 451 { 452 unsigned long tmp; 453 454 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 455 L1_CACHE_BYTES; 456 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); 457 return tmp & i_hash_mask; 458 } 459 460 /** 461 * __insert_inode_hash - hash an inode 462 * @inode: unhashed inode 463 * @hashval: unsigned long value used to locate this object in the 464 * inode_hashtable. 465 * 466 * Add an inode to the inode hash for this superblock. 467 */ 468 void __insert_inode_hash(struct inode *inode, unsigned long hashval) 469 { 470 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 471 472 spin_lock(&inode_hash_lock); 473 spin_lock(&inode->i_lock); 474 hlist_add_head(&inode->i_hash, b); 475 spin_unlock(&inode->i_lock); 476 spin_unlock(&inode_hash_lock); 477 } 478 EXPORT_SYMBOL(__insert_inode_hash); 479 480 /** 481 * __remove_inode_hash - remove an inode from the hash 482 * @inode: inode to unhash 483 * 484 * Remove an inode from the superblock. 485 */ 486 void __remove_inode_hash(struct inode *inode) 487 { 488 spin_lock(&inode_hash_lock); 489 spin_lock(&inode->i_lock); 490 hlist_del_init(&inode->i_hash); 491 spin_unlock(&inode->i_lock); 492 spin_unlock(&inode_hash_lock); 493 } 494 EXPORT_SYMBOL(__remove_inode_hash); 495 496 void clear_inode(struct inode *inode) 497 { 498 might_sleep(); 499 /* 500 * We have to cycle tree_lock here because reclaim can be still in the 501 * process of removing the last page (in __delete_from_page_cache()) 502 * and we must not free mapping under it. 503 */ 504 spin_lock_irq(&inode->i_data.tree_lock); 505 BUG_ON(inode->i_data.nrpages); 506 BUG_ON(inode->i_data.nrshadows); 507 spin_unlock_irq(&inode->i_data.tree_lock); 508 BUG_ON(!list_empty(&inode->i_data.private_list)); 509 BUG_ON(!(inode->i_state & I_FREEING)); 510 BUG_ON(inode->i_state & I_CLEAR); 511 /* don't need i_lock here, no concurrent mods to i_state */ 512 inode->i_state = I_FREEING | I_CLEAR; 513 } 514 EXPORT_SYMBOL(clear_inode); 515 516 /* 517 * Free the inode passed in, removing it from the lists it is still connected 518 * to. We remove any pages still attached to the inode and wait for any IO that 519 * is still in progress before finally destroying the inode. 520 * 521 * An inode must already be marked I_FREEING so that we avoid the inode being 522 * moved back onto lists if we race with other code that manipulates the lists 523 * (e.g. writeback_single_inode). The caller is responsible for setting this. 524 * 525 * An inode must already be removed from the LRU list before being evicted from 526 * the cache. This should occur atomically with setting the I_FREEING state 527 * flag, so no inodes here should ever be on the LRU when being evicted. 528 */ 529 static void evict(struct inode *inode) 530 { 531 const struct super_operations *op = inode->i_sb->s_op; 532 533 BUG_ON(!(inode->i_state & I_FREEING)); 534 BUG_ON(!list_empty(&inode->i_lru)); 535 536 if (!list_empty(&inode->i_wb_list)) 537 inode_wb_list_del(inode); 538 539 inode_sb_list_del(inode); 540 541 /* 542 * Wait for flusher thread to be done with the inode so that filesystem 543 * does not start destroying it while writeback is still running. Since 544 * the inode has I_FREEING set, flusher thread won't start new work on 545 * the inode. We just have to wait for running writeback to finish. 546 */ 547 inode_wait_for_writeback(inode); 548 549 if (op->evict_inode) { 550 op->evict_inode(inode); 551 } else { 552 truncate_inode_pages_final(&inode->i_data); 553 clear_inode(inode); 554 } 555 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 556 bd_forget(inode); 557 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 558 cd_forget(inode); 559 560 remove_inode_hash(inode); 561 562 spin_lock(&inode->i_lock); 563 wake_up_bit(&inode->i_state, __I_NEW); 564 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 565 spin_unlock(&inode->i_lock); 566 567 destroy_inode(inode); 568 } 569 570 /* 571 * dispose_list - dispose of the contents of a local list 572 * @head: the head of the list to free 573 * 574 * Dispose-list gets a local list with local inodes in it, so it doesn't 575 * need to worry about list corruption and SMP locks. 576 */ 577 static void dispose_list(struct list_head *head) 578 { 579 while (!list_empty(head)) { 580 struct inode *inode; 581 582 inode = list_first_entry(head, struct inode, i_lru); 583 list_del_init(&inode->i_lru); 584 585 evict(inode); 586 } 587 } 588 589 /** 590 * evict_inodes - evict all evictable inodes for a superblock 591 * @sb: superblock to operate on 592 * 593 * Make sure that no inodes with zero refcount are retained. This is 594 * called by superblock shutdown after having MS_ACTIVE flag removed, 595 * so any inode reaching zero refcount during or after that call will 596 * be immediately evicted. 597 */ 598 void evict_inodes(struct super_block *sb) 599 { 600 struct inode *inode, *next; 601 LIST_HEAD(dispose); 602 603 spin_lock(&inode_sb_list_lock); 604 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 605 if (atomic_read(&inode->i_count)) 606 continue; 607 608 spin_lock(&inode->i_lock); 609 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 610 spin_unlock(&inode->i_lock); 611 continue; 612 } 613 614 inode->i_state |= I_FREEING; 615 inode_lru_list_del(inode); 616 spin_unlock(&inode->i_lock); 617 list_add(&inode->i_lru, &dispose); 618 } 619 spin_unlock(&inode_sb_list_lock); 620 621 dispose_list(&dispose); 622 } 623 624 /** 625 * invalidate_inodes - attempt to free all inodes on a superblock 626 * @sb: superblock to operate on 627 * @kill_dirty: flag to guide handling of dirty inodes 628 * 629 * Attempts to free all inodes for a given superblock. If there were any 630 * busy inodes return a non-zero value, else zero. 631 * If @kill_dirty is set, discard dirty inodes too, otherwise treat 632 * them as busy. 633 */ 634 int invalidate_inodes(struct super_block *sb, bool kill_dirty) 635 { 636 int busy = 0; 637 struct inode *inode, *next; 638 LIST_HEAD(dispose); 639 640 spin_lock(&inode_sb_list_lock); 641 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 642 spin_lock(&inode->i_lock); 643 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 644 spin_unlock(&inode->i_lock); 645 continue; 646 } 647 if (inode->i_state & I_DIRTY && !kill_dirty) { 648 spin_unlock(&inode->i_lock); 649 busy = 1; 650 continue; 651 } 652 if (atomic_read(&inode->i_count)) { 653 spin_unlock(&inode->i_lock); 654 busy = 1; 655 continue; 656 } 657 658 inode->i_state |= I_FREEING; 659 inode_lru_list_del(inode); 660 spin_unlock(&inode->i_lock); 661 list_add(&inode->i_lru, &dispose); 662 } 663 spin_unlock(&inode_sb_list_lock); 664 665 dispose_list(&dispose); 666 667 return busy; 668 } 669 670 /* 671 * Isolate the inode from the LRU in preparation for freeing it. 672 * 673 * Any inodes which are pinned purely because of attached pagecache have their 674 * pagecache removed. If the inode has metadata buffers attached to 675 * mapping->private_list then try to remove them. 676 * 677 * If the inode has the I_REFERENCED flag set, then it means that it has been 678 * used recently - the flag is set in iput_final(). When we encounter such an 679 * inode, clear the flag and move it to the back of the LRU so it gets another 680 * pass through the LRU before it gets reclaimed. This is necessary because of 681 * the fact we are doing lazy LRU updates to minimise lock contention so the 682 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 683 * with this flag set because they are the inodes that are out of order. 684 */ 685 static enum lru_status 686 inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) 687 { 688 struct list_head *freeable = arg; 689 struct inode *inode = container_of(item, struct inode, i_lru); 690 691 /* 692 * we are inverting the lru lock/inode->i_lock here, so use a trylock. 693 * If we fail to get the lock, just skip it. 694 */ 695 if (!spin_trylock(&inode->i_lock)) 696 return LRU_SKIP; 697 698 /* 699 * Referenced or dirty inodes are still in use. Give them another pass 700 * through the LRU as we canot reclaim them now. 701 */ 702 if (atomic_read(&inode->i_count) || 703 (inode->i_state & ~I_REFERENCED)) { 704 list_del_init(&inode->i_lru); 705 spin_unlock(&inode->i_lock); 706 this_cpu_dec(nr_unused); 707 return LRU_REMOVED; 708 } 709 710 /* recently referenced inodes get one more pass */ 711 if (inode->i_state & I_REFERENCED) { 712 inode->i_state &= ~I_REFERENCED; 713 spin_unlock(&inode->i_lock); 714 return LRU_ROTATE; 715 } 716 717 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 718 __iget(inode); 719 spin_unlock(&inode->i_lock); 720 spin_unlock(lru_lock); 721 if (remove_inode_buffers(inode)) { 722 unsigned long reap; 723 reap = invalidate_mapping_pages(&inode->i_data, 0, -1); 724 if (current_is_kswapd()) 725 __count_vm_events(KSWAPD_INODESTEAL, reap); 726 else 727 __count_vm_events(PGINODESTEAL, reap); 728 if (current->reclaim_state) 729 current->reclaim_state->reclaimed_slab += reap; 730 } 731 iput(inode); 732 spin_lock(lru_lock); 733 return LRU_RETRY; 734 } 735 736 WARN_ON(inode->i_state & I_NEW); 737 inode->i_state |= I_FREEING; 738 list_move(&inode->i_lru, freeable); 739 spin_unlock(&inode->i_lock); 740 741 this_cpu_dec(nr_unused); 742 return LRU_REMOVED; 743 } 744 745 /* 746 * Walk the superblock inode LRU for freeable inodes and attempt to free them. 747 * This is called from the superblock shrinker function with a number of inodes 748 * to trim from the LRU. Inodes to be freed are moved to a temporary list and 749 * then are freed outside inode_lock by dispose_list(). 750 */ 751 long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan, 752 int nid) 753 { 754 LIST_HEAD(freeable); 755 long freed; 756 757 freed = list_lru_walk_node(&sb->s_inode_lru, nid, inode_lru_isolate, 758 &freeable, &nr_to_scan); 759 dispose_list(&freeable); 760 return freed; 761 } 762 763 static void __wait_on_freeing_inode(struct inode *inode); 764 /* 765 * Called with the inode lock held. 766 */ 767 static struct inode *find_inode(struct super_block *sb, 768 struct hlist_head *head, 769 int (*test)(struct inode *, void *), 770 void *data) 771 { 772 struct inode *inode = NULL; 773 774 repeat: 775 hlist_for_each_entry(inode, head, i_hash) { 776 if (inode->i_sb != sb) 777 continue; 778 if (!test(inode, data)) 779 continue; 780 spin_lock(&inode->i_lock); 781 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 782 __wait_on_freeing_inode(inode); 783 goto repeat; 784 } 785 __iget(inode); 786 spin_unlock(&inode->i_lock); 787 return inode; 788 } 789 return NULL; 790 } 791 792 /* 793 * find_inode_fast is the fast path version of find_inode, see the comment at 794 * iget_locked for details. 795 */ 796 static struct inode *find_inode_fast(struct super_block *sb, 797 struct hlist_head *head, unsigned long ino) 798 { 799 struct inode *inode = NULL; 800 801 repeat: 802 hlist_for_each_entry(inode, head, i_hash) { 803 if (inode->i_ino != ino) 804 continue; 805 if (inode->i_sb != sb) 806 continue; 807 spin_lock(&inode->i_lock); 808 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 809 __wait_on_freeing_inode(inode); 810 goto repeat; 811 } 812 __iget(inode); 813 spin_unlock(&inode->i_lock); 814 return inode; 815 } 816 return NULL; 817 } 818 819 /* 820 * Each cpu owns a range of LAST_INO_BATCH numbers. 821 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, 822 * to renew the exhausted range. 823 * 824 * This does not significantly increase overflow rate because every CPU can 825 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is 826 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the 827 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase 828 * overflow rate by 2x, which does not seem too significant. 829 * 830 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 831 * error if st_ino won't fit in target struct field. Use 32bit counter 832 * here to attempt to avoid that. 833 */ 834 #define LAST_INO_BATCH 1024 835 static DEFINE_PER_CPU(unsigned int, last_ino); 836 837 unsigned int get_next_ino(void) 838 { 839 unsigned int *p = &get_cpu_var(last_ino); 840 unsigned int res = *p; 841 842 #ifdef CONFIG_SMP 843 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { 844 static atomic_t shared_last_ino; 845 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); 846 847 res = next - LAST_INO_BATCH; 848 } 849 #endif 850 851 *p = ++res; 852 put_cpu_var(last_ino); 853 return res; 854 } 855 EXPORT_SYMBOL(get_next_ino); 856 857 /** 858 * new_inode_pseudo - obtain an inode 859 * @sb: superblock 860 * 861 * Allocates a new inode for given superblock. 862 * Inode wont be chained in superblock s_inodes list 863 * This means : 864 * - fs can't be unmount 865 * - quotas, fsnotify, writeback can't work 866 */ 867 struct inode *new_inode_pseudo(struct super_block *sb) 868 { 869 struct inode *inode = alloc_inode(sb); 870 871 if (inode) { 872 spin_lock(&inode->i_lock); 873 inode->i_state = 0; 874 spin_unlock(&inode->i_lock); 875 INIT_LIST_HEAD(&inode->i_sb_list); 876 } 877 return inode; 878 } 879 880 /** 881 * new_inode - obtain an inode 882 * @sb: superblock 883 * 884 * Allocates a new inode for given superblock. The default gfp_mask 885 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. 886 * If HIGHMEM pages are unsuitable or it is known that pages allocated 887 * for the page cache are not reclaimable or migratable, 888 * mapping_set_gfp_mask() must be called with suitable flags on the 889 * newly created inode's mapping 890 * 891 */ 892 struct inode *new_inode(struct super_block *sb) 893 { 894 struct inode *inode; 895 896 spin_lock_prefetch(&inode_sb_list_lock); 897 898 inode = new_inode_pseudo(sb); 899 if (inode) 900 inode_sb_list_add(inode); 901 return inode; 902 } 903 EXPORT_SYMBOL(new_inode); 904 905 #ifdef CONFIG_DEBUG_LOCK_ALLOC 906 void lockdep_annotate_inode_mutex_key(struct inode *inode) 907 { 908 if (S_ISDIR(inode->i_mode)) { 909 struct file_system_type *type = inode->i_sb->s_type; 910 911 /* Set new key only if filesystem hasn't already changed it */ 912 if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) { 913 /* 914 * ensure nobody is actually holding i_mutex 915 */ 916 mutex_destroy(&inode->i_mutex); 917 mutex_init(&inode->i_mutex); 918 lockdep_set_class(&inode->i_mutex, 919 &type->i_mutex_dir_key); 920 } 921 } 922 } 923 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); 924 #endif 925 926 /** 927 * unlock_new_inode - clear the I_NEW state and wake up any waiters 928 * @inode: new inode to unlock 929 * 930 * Called when the inode is fully initialised to clear the new state of the 931 * inode and wake up anyone waiting for the inode to finish initialisation. 932 */ 933 void unlock_new_inode(struct inode *inode) 934 { 935 lockdep_annotate_inode_mutex_key(inode); 936 spin_lock(&inode->i_lock); 937 WARN_ON(!(inode->i_state & I_NEW)); 938 inode->i_state &= ~I_NEW; 939 smp_mb(); 940 wake_up_bit(&inode->i_state, __I_NEW); 941 spin_unlock(&inode->i_lock); 942 } 943 EXPORT_SYMBOL(unlock_new_inode); 944 945 /** 946 * lock_two_nondirectories - take two i_mutexes on non-directory objects 947 * 948 * Lock any non-NULL argument that is not a directory. 949 * Zero, one or two objects may be locked by this function. 950 * 951 * @inode1: first inode to lock 952 * @inode2: second inode to lock 953 */ 954 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2) 955 { 956 if (inode1 > inode2) 957 swap(inode1, inode2); 958 959 if (inode1 && !S_ISDIR(inode1->i_mode)) 960 mutex_lock(&inode1->i_mutex); 961 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 962 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_NONDIR2); 963 } 964 EXPORT_SYMBOL(lock_two_nondirectories); 965 966 /** 967 * unlock_two_nondirectories - release locks from lock_two_nondirectories() 968 * @inode1: first inode to unlock 969 * @inode2: second inode to unlock 970 */ 971 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2) 972 { 973 if (inode1 && !S_ISDIR(inode1->i_mode)) 974 mutex_unlock(&inode1->i_mutex); 975 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 976 mutex_unlock(&inode2->i_mutex); 977 } 978 EXPORT_SYMBOL(unlock_two_nondirectories); 979 980 /** 981 * iget5_locked - obtain an inode from a mounted file system 982 * @sb: super block of file system 983 * @hashval: hash value (usually inode number) to get 984 * @test: callback used for comparisons between inodes 985 * @set: callback used to initialize a new struct inode 986 * @data: opaque data pointer to pass to @test and @set 987 * 988 * Search for the inode specified by @hashval and @data in the inode cache, 989 * and if present it is return it with an increased reference count. This is 990 * a generalized version of iget_locked() for file systems where the inode 991 * number is not sufficient for unique identification of an inode. 992 * 993 * If the inode is not in cache, allocate a new inode and return it locked, 994 * hashed, and with the I_NEW flag set. The file system gets to fill it in 995 * before unlocking it via unlock_new_inode(). 996 * 997 * Note both @test and @set are called with the inode_hash_lock held, so can't 998 * sleep. 999 */ 1000 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 1001 int (*test)(struct inode *, void *), 1002 int (*set)(struct inode *, void *), void *data) 1003 { 1004 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1005 struct inode *inode; 1006 1007 spin_lock(&inode_hash_lock); 1008 inode = find_inode(sb, head, test, data); 1009 spin_unlock(&inode_hash_lock); 1010 1011 if (inode) { 1012 wait_on_inode(inode); 1013 return inode; 1014 } 1015 1016 inode = alloc_inode(sb); 1017 if (inode) { 1018 struct inode *old; 1019 1020 spin_lock(&inode_hash_lock); 1021 /* We released the lock, so.. */ 1022 old = find_inode(sb, head, test, data); 1023 if (!old) { 1024 if (set(inode, data)) 1025 goto set_failed; 1026 1027 spin_lock(&inode->i_lock); 1028 inode->i_state = I_NEW; 1029 hlist_add_head(&inode->i_hash, head); 1030 spin_unlock(&inode->i_lock); 1031 inode_sb_list_add(inode); 1032 spin_unlock(&inode_hash_lock); 1033 1034 /* Return the locked inode with I_NEW set, the 1035 * caller is responsible for filling in the contents 1036 */ 1037 return inode; 1038 } 1039 1040 /* 1041 * Uhhuh, somebody else created the same inode under 1042 * us. Use the old inode instead of the one we just 1043 * allocated. 1044 */ 1045 spin_unlock(&inode_hash_lock); 1046 destroy_inode(inode); 1047 inode = old; 1048 wait_on_inode(inode); 1049 } 1050 return inode; 1051 1052 set_failed: 1053 spin_unlock(&inode_hash_lock); 1054 destroy_inode(inode); 1055 return NULL; 1056 } 1057 EXPORT_SYMBOL(iget5_locked); 1058 1059 /** 1060 * iget_locked - obtain an inode from a mounted file system 1061 * @sb: super block of file system 1062 * @ino: inode number to get 1063 * 1064 * Search for the inode specified by @ino in the inode cache and if present 1065 * return it with an increased reference count. This is for file systems 1066 * where the inode number is sufficient for unique identification of an inode. 1067 * 1068 * If the inode is not in cache, allocate a new inode and return it locked, 1069 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1070 * before unlocking it via unlock_new_inode(). 1071 */ 1072 struct inode *iget_locked(struct super_block *sb, unsigned long ino) 1073 { 1074 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1075 struct inode *inode; 1076 1077 spin_lock(&inode_hash_lock); 1078 inode = find_inode_fast(sb, head, ino); 1079 spin_unlock(&inode_hash_lock); 1080 if (inode) { 1081 wait_on_inode(inode); 1082 return inode; 1083 } 1084 1085 inode = alloc_inode(sb); 1086 if (inode) { 1087 struct inode *old; 1088 1089 spin_lock(&inode_hash_lock); 1090 /* We released the lock, so.. */ 1091 old = find_inode_fast(sb, head, ino); 1092 if (!old) { 1093 inode->i_ino = ino; 1094 spin_lock(&inode->i_lock); 1095 inode->i_state = I_NEW; 1096 hlist_add_head(&inode->i_hash, head); 1097 spin_unlock(&inode->i_lock); 1098 inode_sb_list_add(inode); 1099 spin_unlock(&inode_hash_lock); 1100 1101 /* Return the locked inode with I_NEW set, the 1102 * caller is responsible for filling in the contents 1103 */ 1104 return inode; 1105 } 1106 1107 /* 1108 * Uhhuh, somebody else created the same inode under 1109 * us. Use the old inode instead of the one we just 1110 * allocated. 1111 */ 1112 spin_unlock(&inode_hash_lock); 1113 destroy_inode(inode); 1114 inode = old; 1115 wait_on_inode(inode); 1116 } 1117 return inode; 1118 } 1119 EXPORT_SYMBOL(iget_locked); 1120 1121 /* 1122 * search the inode cache for a matching inode number. 1123 * If we find one, then the inode number we are trying to 1124 * allocate is not unique and so we should not use it. 1125 * 1126 * Returns 1 if the inode number is unique, 0 if it is not. 1127 */ 1128 static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1129 { 1130 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1131 struct inode *inode; 1132 1133 spin_lock(&inode_hash_lock); 1134 hlist_for_each_entry(inode, b, i_hash) { 1135 if (inode->i_ino == ino && inode->i_sb == sb) { 1136 spin_unlock(&inode_hash_lock); 1137 return 0; 1138 } 1139 } 1140 spin_unlock(&inode_hash_lock); 1141 1142 return 1; 1143 } 1144 1145 /** 1146 * iunique - get a unique inode number 1147 * @sb: superblock 1148 * @max_reserved: highest reserved inode number 1149 * 1150 * Obtain an inode number that is unique on the system for a given 1151 * superblock. This is used by file systems that have no natural 1152 * permanent inode numbering system. An inode number is returned that 1153 * is higher than the reserved limit but unique. 1154 * 1155 * BUGS: 1156 * With a large number of inodes live on the file system this function 1157 * currently becomes quite slow. 1158 */ 1159 ino_t iunique(struct super_block *sb, ino_t max_reserved) 1160 { 1161 /* 1162 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 1163 * error if st_ino won't fit in target struct field. Use 32bit counter 1164 * here to attempt to avoid that. 1165 */ 1166 static DEFINE_SPINLOCK(iunique_lock); 1167 static unsigned int counter; 1168 ino_t res; 1169 1170 spin_lock(&iunique_lock); 1171 do { 1172 if (counter <= max_reserved) 1173 counter = max_reserved + 1; 1174 res = counter++; 1175 } while (!test_inode_iunique(sb, res)); 1176 spin_unlock(&iunique_lock); 1177 1178 return res; 1179 } 1180 EXPORT_SYMBOL(iunique); 1181 1182 struct inode *igrab(struct inode *inode) 1183 { 1184 spin_lock(&inode->i_lock); 1185 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1186 __iget(inode); 1187 spin_unlock(&inode->i_lock); 1188 } else { 1189 spin_unlock(&inode->i_lock); 1190 /* 1191 * Handle the case where s_op->clear_inode is not been 1192 * called yet, and somebody is calling igrab 1193 * while the inode is getting freed. 1194 */ 1195 inode = NULL; 1196 } 1197 return inode; 1198 } 1199 EXPORT_SYMBOL(igrab); 1200 1201 /** 1202 * ilookup5_nowait - search for an inode in the inode cache 1203 * @sb: super block of file system to search 1204 * @hashval: hash value (usually inode number) to search for 1205 * @test: callback used for comparisons between inodes 1206 * @data: opaque data pointer to pass to @test 1207 * 1208 * Search for the inode specified by @hashval and @data in the inode cache. 1209 * If the inode is in the cache, the inode is returned with an incremented 1210 * reference count. 1211 * 1212 * Note: I_NEW is not waited upon so you have to be very careful what you do 1213 * with the returned inode. You probably should be using ilookup5() instead. 1214 * 1215 * Note2: @test is called with the inode_hash_lock held, so can't sleep. 1216 */ 1217 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1218 int (*test)(struct inode *, void *), void *data) 1219 { 1220 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1221 struct inode *inode; 1222 1223 spin_lock(&inode_hash_lock); 1224 inode = find_inode(sb, head, test, data); 1225 spin_unlock(&inode_hash_lock); 1226 1227 return inode; 1228 } 1229 EXPORT_SYMBOL(ilookup5_nowait); 1230 1231 /** 1232 * ilookup5 - search for an inode in the inode cache 1233 * @sb: super block of file system to search 1234 * @hashval: hash value (usually inode number) to search for 1235 * @test: callback used for comparisons between inodes 1236 * @data: opaque data pointer to pass to @test 1237 * 1238 * Search for the inode specified by @hashval and @data in the inode cache, 1239 * and if the inode is in the cache, return the inode with an incremented 1240 * reference count. Waits on I_NEW before returning the inode. 1241 * returned with an incremented reference count. 1242 * 1243 * This is a generalized version of ilookup() for file systems where the 1244 * inode number is not sufficient for unique identification of an inode. 1245 * 1246 * Note: @test is called with the inode_hash_lock held, so can't sleep. 1247 */ 1248 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1249 int (*test)(struct inode *, void *), void *data) 1250 { 1251 struct inode *inode = ilookup5_nowait(sb, hashval, test, data); 1252 1253 if (inode) 1254 wait_on_inode(inode); 1255 return inode; 1256 } 1257 EXPORT_SYMBOL(ilookup5); 1258 1259 /** 1260 * ilookup - search for an inode in the inode cache 1261 * @sb: super block of file system to search 1262 * @ino: inode number to search for 1263 * 1264 * Search for the inode @ino in the inode cache, and if the inode is in the 1265 * cache, the inode is returned with an incremented reference count. 1266 */ 1267 struct inode *ilookup(struct super_block *sb, unsigned long ino) 1268 { 1269 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1270 struct inode *inode; 1271 1272 spin_lock(&inode_hash_lock); 1273 inode = find_inode_fast(sb, head, ino); 1274 spin_unlock(&inode_hash_lock); 1275 1276 if (inode) 1277 wait_on_inode(inode); 1278 return inode; 1279 } 1280 EXPORT_SYMBOL(ilookup); 1281 1282 int insert_inode_locked(struct inode *inode) 1283 { 1284 struct super_block *sb = inode->i_sb; 1285 ino_t ino = inode->i_ino; 1286 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1287 1288 while (1) { 1289 struct inode *old = NULL; 1290 spin_lock(&inode_hash_lock); 1291 hlist_for_each_entry(old, head, i_hash) { 1292 if (old->i_ino != ino) 1293 continue; 1294 if (old->i_sb != sb) 1295 continue; 1296 spin_lock(&old->i_lock); 1297 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1298 spin_unlock(&old->i_lock); 1299 continue; 1300 } 1301 break; 1302 } 1303 if (likely(!old)) { 1304 spin_lock(&inode->i_lock); 1305 inode->i_state |= I_NEW; 1306 hlist_add_head(&inode->i_hash, head); 1307 spin_unlock(&inode->i_lock); 1308 spin_unlock(&inode_hash_lock); 1309 return 0; 1310 } 1311 __iget(old); 1312 spin_unlock(&old->i_lock); 1313 spin_unlock(&inode_hash_lock); 1314 wait_on_inode(old); 1315 if (unlikely(!inode_unhashed(old))) { 1316 iput(old); 1317 return -EBUSY; 1318 } 1319 iput(old); 1320 } 1321 } 1322 EXPORT_SYMBOL(insert_inode_locked); 1323 1324 int insert_inode_locked4(struct inode *inode, unsigned long hashval, 1325 int (*test)(struct inode *, void *), void *data) 1326 { 1327 struct super_block *sb = inode->i_sb; 1328 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1329 1330 while (1) { 1331 struct inode *old = NULL; 1332 1333 spin_lock(&inode_hash_lock); 1334 hlist_for_each_entry(old, head, i_hash) { 1335 if (old->i_sb != sb) 1336 continue; 1337 if (!test(old, data)) 1338 continue; 1339 spin_lock(&old->i_lock); 1340 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1341 spin_unlock(&old->i_lock); 1342 continue; 1343 } 1344 break; 1345 } 1346 if (likely(!old)) { 1347 spin_lock(&inode->i_lock); 1348 inode->i_state |= I_NEW; 1349 hlist_add_head(&inode->i_hash, head); 1350 spin_unlock(&inode->i_lock); 1351 spin_unlock(&inode_hash_lock); 1352 return 0; 1353 } 1354 __iget(old); 1355 spin_unlock(&old->i_lock); 1356 spin_unlock(&inode_hash_lock); 1357 wait_on_inode(old); 1358 if (unlikely(!inode_unhashed(old))) { 1359 iput(old); 1360 return -EBUSY; 1361 } 1362 iput(old); 1363 } 1364 } 1365 EXPORT_SYMBOL(insert_inode_locked4); 1366 1367 1368 int generic_delete_inode(struct inode *inode) 1369 { 1370 return 1; 1371 } 1372 EXPORT_SYMBOL(generic_delete_inode); 1373 1374 /* 1375 * Called when we're dropping the last reference 1376 * to an inode. 1377 * 1378 * Call the FS "drop_inode()" function, defaulting to 1379 * the legacy UNIX filesystem behaviour. If it tells 1380 * us to evict inode, do so. Otherwise, retain inode 1381 * in cache if fs is alive, sync and evict if fs is 1382 * shutting down. 1383 */ 1384 static void iput_final(struct inode *inode) 1385 { 1386 struct super_block *sb = inode->i_sb; 1387 const struct super_operations *op = inode->i_sb->s_op; 1388 int drop; 1389 1390 WARN_ON(inode->i_state & I_NEW); 1391 1392 if (op->drop_inode) 1393 drop = op->drop_inode(inode); 1394 else 1395 drop = generic_drop_inode(inode); 1396 1397 if (!drop && (sb->s_flags & MS_ACTIVE)) { 1398 inode->i_state |= I_REFERENCED; 1399 inode_add_lru(inode); 1400 spin_unlock(&inode->i_lock); 1401 return; 1402 } 1403 1404 if (!drop) { 1405 inode->i_state |= I_WILL_FREE; 1406 spin_unlock(&inode->i_lock); 1407 write_inode_now(inode, 1); 1408 spin_lock(&inode->i_lock); 1409 WARN_ON(inode->i_state & I_NEW); 1410 inode->i_state &= ~I_WILL_FREE; 1411 } 1412 1413 inode->i_state |= I_FREEING; 1414 if (!list_empty(&inode->i_lru)) 1415 inode_lru_list_del(inode); 1416 spin_unlock(&inode->i_lock); 1417 1418 evict(inode); 1419 } 1420 1421 /** 1422 * iput - put an inode 1423 * @inode: inode to put 1424 * 1425 * Puts an inode, dropping its usage count. If the inode use count hits 1426 * zero, the inode is then freed and may also be destroyed. 1427 * 1428 * Consequently, iput() can sleep. 1429 */ 1430 void iput(struct inode *inode) 1431 { 1432 if (inode) { 1433 BUG_ON(inode->i_state & I_CLEAR); 1434 1435 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) 1436 iput_final(inode); 1437 } 1438 } 1439 EXPORT_SYMBOL(iput); 1440 1441 /** 1442 * bmap - find a block number in a file 1443 * @inode: inode of file 1444 * @block: block to find 1445 * 1446 * Returns the block number on the device holding the inode that 1447 * is the disk block number for the block of the file requested. 1448 * That is, asked for block 4 of inode 1 the function will return the 1449 * disk block relative to the disk start that holds that block of the 1450 * file. 1451 */ 1452 sector_t bmap(struct inode *inode, sector_t block) 1453 { 1454 sector_t res = 0; 1455 if (inode->i_mapping->a_ops->bmap) 1456 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); 1457 return res; 1458 } 1459 EXPORT_SYMBOL(bmap); 1460 1461 /* 1462 * With relative atime, only update atime if the previous atime is 1463 * earlier than either the ctime or mtime or if at least a day has 1464 * passed since the last atime update. 1465 */ 1466 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, 1467 struct timespec now) 1468 { 1469 1470 if (!(mnt->mnt_flags & MNT_RELATIME)) 1471 return 1; 1472 /* 1473 * Is mtime younger than atime? If yes, update atime: 1474 */ 1475 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0) 1476 return 1; 1477 /* 1478 * Is ctime younger than atime? If yes, update atime: 1479 */ 1480 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0) 1481 return 1; 1482 1483 /* 1484 * Is the previous atime value older than a day? If yes, 1485 * update atime: 1486 */ 1487 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) 1488 return 1; 1489 /* 1490 * Good, we can skip the atime update: 1491 */ 1492 return 0; 1493 } 1494 1495 /* 1496 * This does the actual work of updating an inodes time or version. Must have 1497 * had called mnt_want_write() before calling this. 1498 */ 1499 static int update_time(struct inode *inode, struct timespec *time, int flags) 1500 { 1501 if (inode->i_op->update_time) 1502 return inode->i_op->update_time(inode, time, flags); 1503 1504 if (flags & S_ATIME) 1505 inode->i_atime = *time; 1506 if (flags & S_VERSION) 1507 inode_inc_iversion(inode); 1508 if (flags & S_CTIME) 1509 inode->i_ctime = *time; 1510 if (flags & S_MTIME) 1511 inode->i_mtime = *time; 1512 mark_inode_dirty_sync(inode); 1513 return 0; 1514 } 1515 1516 /** 1517 * touch_atime - update the access time 1518 * @path: the &struct path to update 1519 * 1520 * Update the accessed time on an inode and mark it for writeback. 1521 * This function automatically handles read only file systems and media, 1522 * as well as the "noatime" flag and inode specific "noatime" markers. 1523 */ 1524 void touch_atime(const struct path *path) 1525 { 1526 struct vfsmount *mnt = path->mnt; 1527 struct inode *inode = path->dentry->d_inode; 1528 struct timespec now; 1529 1530 if (inode->i_flags & S_NOATIME) 1531 return; 1532 if (IS_NOATIME(inode)) 1533 return; 1534 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)) 1535 return; 1536 1537 if (mnt->mnt_flags & MNT_NOATIME) 1538 return; 1539 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) 1540 return; 1541 1542 now = current_fs_time(inode->i_sb); 1543 1544 if (!relatime_need_update(mnt, inode, now)) 1545 return; 1546 1547 if (timespec_equal(&inode->i_atime, &now)) 1548 return; 1549 1550 if (!sb_start_write_trylock(inode->i_sb)) 1551 return; 1552 1553 if (__mnt_want_write(mnt)) 1554 goto skip_update; 1555 /* 1556 * File systems can error out when updating inodes if they need to 1557 * allocate new space to modify an inode (such is the case for 1558 * Btrfs), but since we touch atime while walking down the path we 1559 * really don't care if we failed to update the atime of the file, 1560 * so just ignore the return value. 1561 * We may also fail on filesystems that have the ability to make parts 1562 * of the fs read only, e.g. subvolumes in Btrfs. 1563 */ 1564 update_time(inode, &now, S_ATIME); 1565 __mnt_drop_write(mnt); 1566 skip_update: 1567 sb_end_write(inode->i_sb); 1568 } 1569 EXPORT_SYMBOL(touch_atime); 1570 1571 /* 1572 * The logic we want is 1573 * 1574 * if suid or (sgid and xgrp) 1575 * remove privs 1576 */ 1577 int should_remove_suid(struct dentry *dentry) 1578 { 1579 umode_t mode = dentry->d_inode->i_mode; 1580 int kill = 0; 1581 1582 /* suid always must be killed */ 1583 if (unlikely(mode & S_ISUID)) 1584 kill = ATTR_KILL_SUID; 1585 1586 /* 1587 * sgid without any exec bits is just a mandatory locking mark; leave 1588 * it alone. If some exec bits are set, it's a real sgid; kill it. 1589 */ 1590 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1591 kill |= ATTR_KILL_SGID; 1592 1593 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) 1594 return kill; 1595 1596 return 0; 1597 } 1598 EXPORT_SYMBOL(should_remove_suid); 1599 1600 static int __remove_suid(struct dentry *dentry, int kill) 1601 { 1602 struct iattr newattrs; 1603 1604 newattrs.ia_valid = ATTR_FORCE | kill; 1605 /* 1606 * Note we call this on write, so notify_change will not 1607 * encounter any conflicting delegations: 1608 */ 1609 return notify_change(dentry, &newattrs, NULL); 1610 } 1611 1612 int file_remove_suid(struct file *file) 1613 { 1614 struct dentry *dentry = file->f_path.dentry; 1615 struct inode *inode = dentry->d_inode; 1616 int killsuid; 1617 int killpriv; 1618 int error = 0; 1619 1620 /* Fast path for nothing security related */ 1621 if (IS_NOSEC(inode)) 1622 return 0; 1623 1624 killsuid = should_remove_suid(dentry); 1625 killpriv = security_inode_need_killpriv(dentry); 1626 1627 if (killpriv < 0) 1628 return killpriv; 1629 if (killpriv) 1630 error = security_inode_killpriv(dentry); 1631 if (!error && killsuid) 1632 error = __remove_suid(dentry, killsuid); 1633 if (!error && (inode->i_sb->s_flags & MS_NOSEC)) 1634 inode->i_flags |= S_NOSEC; 1635 1636 return error; 1637 } 1638 EXPORT_SYMBOL(file_remove_suid); 1639 1640 /** 1641 * file_update_time - update mtime and ctime time 1642 * @file: file accessed 1643 * 1644 * Update the mtime and ctime members of an inode and mark the inode 1645 * for writeback. Note that this function is meant exclusively for 1646 * usage in the file write path of filesystems, and filesystems may 1647 * choose to explicitly ignore update via this function with the 1648 * S_NOCMTIME inode flag, e.g. for network filesystem where these 1649 * timestamps are handled by the server. This can return an error for 1650 * file systems who need to allocate space in order to update an inode. 1651 */ 1652 1653 int file_update_time(struct file *file) 1654 { 1655 struct inode *inode = file_inode(file); 1656 struct timespec now; 1657 int sync_it = 0; 1658 int ret; 1659 1660 /* First try to exhaust all avenues to not sync */ 1661 if (IS_NOCMTIME(inode)) 1662 return 0; 1663 1664 now = current_fs_time(inode->i_sb); 1665 if (!timespec_equal(&inode->i_mtime, &now)) 1666 sync_it = S_MTIME; 1667 1668 if (!timespec_equal(&inode->i_ctime, &now)) 1669 sync_it |= S_CTIME; 1670 1671 if (IS_I_VERSION(inode)) 1672 sync_it |= S_VERSION; 1673 1674 if (!sync_it) 1675 return 0; 1676 1677 /* Finally allowed to write? Takes lock. */ 1678 if (__mnt_want_write_file(file)) 1679 return 0; 1680 1681 ret = update_time(inode, &now, sync_it); 1682 __mnt_drop_write_file(file); 1683 1684 return ret; 1685 } 1686 EXPORT_SYMBOL(file_update_time); 1687 1688 int inode_needs_sync(struct inode *inode) 1689 { 1690 if (IS_SYNC(inode)) 1691 return 1; 1692 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 1693 return 1; 1694 return 0; 1695 } 1696 EXPORT_SYMBOL(inode_needs_sync); 1697 1698 int inode_wait(void *word) 1699 { 1700 schedule(); 1701 return 0; 1702 } 1703 EXPORT_SYMBOL(inode_wait); 1704 1705 /* 1706 * If we try to find an inode in the inode hash while it is being 1707 * deleted, we have to wait until the filesystem completes its 1708 * deletion before reporting that it isn't found. This function waits 1709 * until the deletion _might_ have completed. Callers are responsible 1710 * to recheck inode state. 1711 * 1712 * It doesn't matter if I_NEW is not set initially, a call to 1713 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list 1714 * will DTRT. 1715 */ 1716 static void __wait_on_freeing_inode(struct inode *inode) 1717 { 1718 wait_queue_head_t *wq; 1719 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 1720 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1721 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1722 spin_unlock(&inode->i_lock); 1723 spin_unlock(&inode_hash_lock); 1724 schedule(); 1725 finish_wait(wq, &wait.wait); 1726 spin_lock(&inode_hash_lock); 1727 } 1728 1729 static __initdata unsigned long ihash_entries; 1730 static int __init set_ihash_entries(char *str) 1731 { 1732 if (!str) 1733 return 0; 1734 ihash_entries = simple_strtoul(str, &str, 0); 1735 return 1; 1736 } 1737 __setup("ihash_entries=", set_ihash_entries); 1738 1739 /* 1740 * Initialize the waitqueues and inode hash table. 1741 */ 1742 void __init inode_init_early(void) 1743 { 1744 unsigned int loop; 1745 1746 /* If hashes are distributed across NUMA nodes, defer 1747 * hash allocation until vmalloc space is available. 1748 */ 1749 if (hashdist) 1750 return; 1751 1752 inode_hashtable = 1753 alloc_large_system_hash("Inode-cache", 1754 sizeof(struct hlist_head), 1755 ihash_entries, 1756 14, 1757 HASH_EARLY, 1758 &i_hash_shift, 1759 &i_hash_mask, 1760 0, 1761 0); 1762 1763 for (loop = 0; loop < (1U << i_hash_shift); loop++) 1764 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1765 } 1766 1767 void __init inode_init(void) 1768 { 1769 unsigned int loop; 1770 1771 /* inode slab cache */ 1772 inode_cachep = kmem_cache_create("inode_cache", 1773 sizeof(struct inode), 1774 0, 1775 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 1776 SLAB_MEM_SPREAD), 1777 init_once); 1778 1779 /* Hash may have been set up in inode_init_early */ 1780 if (!hashdist) 1781 return; 1782 1783 inode_hashtable = 1784 alloc_large_system_hash("Inode-cache", 1785 sizeof(struct hlist_head), 1786 ihash_entries, 1787 14, 1788 0, 1789 &i_hash_shift, 1790 &i_hash_mask, 1791 0, 1792 0); 1793 1794 for (loop = 0; loop < (1U << i_hash_shift); loop++) 1795 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1796 } 1797 1798 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) 1799 { 1800 inode->i_mode = mode; 1801 if (S_ISCHR(mode)) { 1802 inode->i_fop = &def_chr_fops; 1803 inode->i_rdev = rdev; 1804 } else if (S_ISBLK(mode)) { 1805 inode->i_fop = &def_blk_fops; 1806 inode->i_rdev = rdev; 1807 } else if (S_ISFIFO(mode)) 1808 inode->i_fop = &pipefifo_fops; 1809 else if (S_ISSOCK(mode)) 1810 inode->i_fop = &bad_sock_fops; 1811 else 1812 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" 1813 " inode %s:%lu\n", mode, inode->i_sb->s_id, 1814 inode->i_ino); 1815 } 1816 EXPORT_SYMBOL(init_special_inode); 1817 1818 /** 1819 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards 1820 * @inode: New inode 1821 * @dir: Directory inode 1822 * @mode: mode of the new inode 1823 */ 1824 void inode_init_owner(struct inode *inode, const struct inode *dir, 1825 umode_t mode) 1826 { 1827 inode->i_uid = current_fsuid(); 1828 if (dir && dir->i_mode & S_ISGID) { 1829 inode->i_gid = dir->i_gid; 1830 if (S_ISDIR(mode)) 1831 mode |= S_ISGID; 1832 } else 1833 inode->i_gid = current_fsgid(); 1834 inode->i_mode = mode; 1835 } 1836 EXPORT_SYMBOL(inode_init_owner); 1837 1838 /** 1839 * inode_owner_or_capable - check current task permissions to inode 1840 * @inode: inode being checked 1841 * 1842 * Return true if current either has CAP_FOWNER in a namespace with the 1843 * inode owner uid mapped, or owns the file. 1844 */ 1845 bool inode_owner_or_capable(const struct inode *inode) 1846 { 1847 struct user_namespace *ns; 1848 1849 if (uid_eq(current_fsuid(), inode->i_uid)) 1850 return true; 1851 1852 ns = current_user_ns(); 1853 if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid)) 1854 return true; 1855 return false; 1856 } 1857 EXPORT_SYMBOL(inode_owner_or_capable); 1858 1859 /* 1860 * Direct i/o helper functions 1861 */ 1862 static void __inode_dio_wait(struct inode *inode) 1863 { 1864 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); 1865 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); 1866 1867 do { 1868 prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE); 1869 if (atomic_read(&inode->i_dio_count)) 1870 schedule(); 1871 } while (atomic_read(&inode->i_dio_count)); 1872 finish_wait(wq, &q.wait); 1873 } 1874 1875 /** 1876 * inode_dio_wait - wait for outstanding DIO requests to finish 1877 * @inode: inode to wait for 1878 * 1879 * Waits for all pending direct I/O requests to finish so that we can 1880 * proceed with a truncate or equivalent operation. 1881 * 1882 * Must be called under a lock that serializes taking new references 1883 * to i_dio_count, usually by inode->i_mutex. 1884 */ 1885 void inode_dio_wait(struct inode *inode) 1886 { 1887 if (atomic_read(&inode->i_dio_count)) 1888 __inode_dio_wait(inode); 1889 } 1890 EXPORT_SYMBOL(inode_dio_wait); 1891 1892 /* 1893 * inode_dio_done - signal finish of a direct I/O requests 1894 * @inode: inode the direct I/O happens on 1895 * 1896 * This is called once we've finished processing a direct I/O request, 1897 * and is used to wake up callers waiting for direct I/O to be quiesced. 1898 */ 1899 void inode_dio_done(struct inode *inode) 1900 { 1901 if (atomic_dec_and_test(&inode->i_dio_count)) 1902 wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); 1903 } 1904 EXPORT_SYMBOL(inode_dio_done); 1905 1906 /* 1907 * inode_set_flags - atomically set some inode flags 1908 * 1909 * Note: the caller should be holding i_mutex, or else be sure that 1910 * they have exclusive access to the inode structure (i.e., while the 1911 * inode is being instantiated). The reason for the cmpxchg() loop 1912 * --- which wouldn't be necessary if all code paths which modify 1913 * i_flags actually followed this rule, is that there is at least one 1914 * code path which doesn't today --- for example, 1915 * __generic_file_aio_write() calls file_remove_suid() without holding 1916 * i_mutex --- so we use cmpxchg() out of an abundance of caution. 1917 * 1918 * In the long run, i_mutex is overkill, and we should probably look 1919 * at using the i_lock spinlock to protect i_flags, and then make sure 1920 * it is so documented in include/linux/fs.h and that all code follows 1921 * the locking convention!! 1922 */ 1923 void inode_set_flags(struct inode *inode, unsigned int flags, 1924 unsigned int mask) 1925 { 1926 unsigned int old_flags, new_flags; 1927 1928 WARN_ON_ONCE(flags & ~mask); 1929 do { 1930 old_flags = ACCESS_ONCE(inode->i_flags); 1931 new_flags = (old_flags & ~mask) | flags; 1932 } while (unlikely(cmpxchg(&inode->i_flags, old_flags, 1933 new_flags) != old_flags)); 1934 } 1935 EXPORT_SYMBOL(inode_set_flags); 1936