1 /* 2 * (C) 1997 Linus Torvalds 3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation) 4 */ 5 #include <linux/export.h> 6 #include <linux/fs.h> 7 #include <linux/mm.h> 8 #include <linux/backing-dev.h> 9 #include <linux/hash.h> 10 #include <linux/swap.h> 11 #include <linux/security.h> 12 #include <linux/cdev.h> 13 #include <linux/bootmem.h> 14 #include <linux/fsnotify.h> 15 #include <linux/mount.h> 16 #include <linux/posix_acl.h> 17 #include <linux/prefetch.h> 18 #include <linux/buffer_head.h> /* for inode_has_buffers */ 19 #include <linux/ratelimit.h> 20 #include <linux/list_lru.h> 21 #include <trace/events/writeback.h> 22 #include "internal.h" 23 24 /* 25 * Inode locking rules: 26 * 27 * inode->i_lock protects: 28 * inode->i_state, inode->i_hash, __iget() 29 * Inode LRU list locks protect: 30 * inode->i_sb->s_inode_lru, inode->i_lru 31 * inode->i_sb->s_inode_list_lock protects: 32 * inode->i_sb->s_inodes, inode->i_sb_list 33 * bdi->wb.list_lock protects: 34 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list 35 * inode_hash_lock protects: 36 * inode_hashtable, inode->i_hash 37 * 38 * Lock ordering: 39 * 40 * inode->i_sb->s_inode_list_lock 41 * inode->i_lock 42 * Inode LRU list locks 43 * 44 * bdi->wb.list_lock 45 * inode->i_lock 46 * 47 * inode_hash_lock 48 * inode->i_sb->s_inode_list_lock 49 * inode->i_lock 50 * 51 * iunique_lock 52 * inode_hash_lock 53 */ 54 55 static unsigned int i_hash_mask __read_mostly; 56 static unsigned int i_hash_shift __read_mostly; 57 static struct hlist_head *inode_hashtable __read_mostly; 58 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 59 60 /* 61 * Empty aops. Can be used for the cases where the user does not 62 * define any of the address_space operations. 63 */ 64 const struct address_space_operations empty_aops = { 65 }; 66 EXPORT_SYMBOL(empty_aops); 67 68 /* 69 * Statistics gathering.. 70 */ 71 struct inodes_stat_t inodes_stat; 72 73 static DEFINE_PER_CPU(unsigned long, nr_inodes); 74 static DEFINE_PER_CPU(unsigned long, nr_unused); 75 76 static struct kmem_cache *inode_cachep __read_mostly; 77 78 static long get_nr_inodes(void) 79 { 80 int i; 81 long sum = 0; 82 for_each_possible_cpu(i) 83 sum += per_cpu(nr_inodes, i); 84 return sum < 0 ? 0 : sum; 85 } 86 87 static inline long get_nr_inodes_unused(void) 88 { 89 int i; 90 long sum = 0; 91 for_each_possible_cpu(i) 92 sum += per_cpu(nr_unused, i); 93 return sum < 0 ? 0 : sum; 94 } 95 96 long get_nr_dirty_inodes(void) 97 { 98 /* not actually dirty inodes, but a wild approximation */ 99 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); 100 return nr_dirty > 0 ? nr_dirty : 0; 101 } 102 103 /* 104 * Handle nr_inode sysctl 105 */ 106 #ifdef CONFIG_SYSCTL 107 int proc_nr_inodes(struct ctl_table *table, int write, 108 void __user *buffer, size_t *lenp, loff_t *ppos) 109 { 110 inodes_stat.nr_inodes = get_nr_inodes(); 111 inodes_stat.nr_unused = get_nr_inodes_unused(); 112 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 113 } 114 #endif 115 116 static int no_open(struct inode *inode, struct file *file) 117 { 118 return -ENXIO; 119 } 120 121 /** 122 * inode_init_always - perform inode structure initialisation 123 * @sb: superblock inode belongs to 124 * @inode: inode to initialise 125 * 126 * These are initializations that need to be done on every inode 127 * allocation as the fields are not initialised by slab allocation. 128 */ 129 int inode_init_always(struct super_block *sb, struct inode *inode) 130 { 131 static const struct inode_operations empty_iops; 132 static const struct file_operations no_open_fops = {.open = no_open}; 133 struct address_space *const mapping = &inode->i_data; 134 135 inode->i_sb = sb; 136 inode->i_blkbits = sb->s_blocksize_bits; 137 inode->i_flags = 0; 138 atomic_set(&inode->i_count, 1); 139 inode->i_op = &empty_iops; 140 inode->i_fop = &no_open_fops; 141 inode->__i_nlink = 1; 142 inode->i_opflags = 0; 143 if (sb->s_xattr) 144 inode->i_opflags |= IOP_XATTR; 145 i_uid_write(inode, 0); 146 i_gid_write(inode, 0); 147 atomic_set(&inode->i_writecount, 0); 148 inode->i_size = 0; 149 inode->i_write_hint = WRITE_LIFE_NOT_SET; 150 inode->i_blocks = 0; 151 inode->i_bytes = 0; 152 inode->i_generation = 0; 153 inode->i_pipe = NULL; 154 inode->i_bdev = NULL; 155 inode->i_cdev = NULL; 156 inode->i_link = NULL; 157 inode->i_dir_seq = 0; 158 inode->i_rdev = 0; 159 inode->dirtied_when = 0; 160 161 #ifdef CONFIG_CGROUP_WRITEBACK 162 inode->i_wb_frn_winner = 0; 163 inode->i_wb_frn_avg_time = 0; 164 inode->i_wb_frn_history = 0; 165 #endif 166 167 if (security_inode_alloc(inode)) 168 goto out; 169 spin_lock_init(&inode->i_lock); 170 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 171 172 init_rwsem(&inode->i_rwsem); 173 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key); 174 175 atomic_set(&inode->i_dio_count, 0); 176 177 mapping->a_ops = &empty_aops; 178 mapping->host = inode; 179 mapping->flags = 0; 180 atomic_set(&mapping->i_mmap_writable, 0); 181 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 182 mapping->private_data = NULL; 183 mapping->writeback_index = 0; 184 inode->i_private = NULL; 185 inode->i_mapping = mapping; 186 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ 187 #ifdef CONFIG_FS_POSIX_ACL 188 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; 189 #endif 190 191 #ifdef CONFIG_FSNOTIFY 192 inode->i_fsnotify_mask = 0; 193 #endif 194 inode->i_flctx = NULL; 195 this_cpu_inc(nr_inodes); 196 197 return 0; 198 out: 199 return -ENOMEM; 200 } 201 EXPORT_SYMBOL(inode_init_always); 202 203 static struct inode *alloc_inode(struct super_block *sb) 204 { 205 struct inode *inode; 206 207 if (sb->s_op->alloc_inode) 208 inode = sb->s_op->alloc_inode(sb); 209 else 210 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); 211 212 if (!inode) 213 return NULL; 214 215 if (unlikely(inode_init_always(sb, inode))) { 216 if (inode->i_sb->s_op->destroy_inode) 217 inode->i_sb->s_op->destroy_inode(inode); 218 else 219 kmem_cache_free(inode_cachep, inode); 220 return NULL; 221 } 222 223 return inode; 224 } 225 226 void free_inode_nonrcu(struct inode *inode) 227 { 228 kmem_cache_free(inode_cachep, inode); 229 } 230 EXPORT_SYMBOL(free_inode_nonrcu); 231 232 void __destroy_inode(struct inode *inode) 233 { 234 BUG_ON(inode_has_buffers(inode)); 235 inode_detach_wb(inode); 236 security_inode_free(inode); 237 fsnotify_inode_delete(inode); 238 locks_free_lock_context(inode); 239 if (!inode->i_nlink) { 240 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 241 atomic_long_dec(&inode->i_sb->s_remove_count); 242 } 243 244 #ifdef CONFIG_FS_POSIX_ACL 245 if (inode->i_acl && !is_uncached_acl(inode->i_acl)) 246 posix_acl_release(inode->i_acl); 247 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl)) 248 posix_acl_release(inode->i_default_acl); 249 #endif 250 this_cpu_dec(nr_inodes); 251 } 252 EXPORT_SYMBOL(__destroy_inode); 253 254 static void i_callback(struct rcu_head *head) 255 { 256 struct inode *inode = container_of(head, struct inode, i_rcu); 257 kmem_cache_free(inode_cachep, inode); 258 } 259 260 static void destroy_inode(struct inode *inode) 261 { 262 BUG_ON(!list_empty(&inode->i_lru)); 263 __destroy_inode(inode); 264 if (inode->i_sb->s_op->destroy_inode) 265 inode->i_sb->s_op->destroy_inode(inode); 266 else 267 call_rcu(&inode->i_rcu, i_callback); 268 } 269 270 /** 271 * drop_nlink - directly drop an inode's link count 272 * @inode: inode 273 * 274 * This is a low-level filesystem helper to replace any 275 * direct filesystem manipulation of i_nlink. In cases 276 * where we are attempting to track writes to the 277 * filesystem, a decrement to zero means an imminent 278 * write when the file is truncated and actually unlinked 279 * on the filesystem. 280 */ 281 void drop_nlink(struct inode *inode) 282 { 283 WARN_ON(inode->i_nlink == 0); 284 inode->__i_nlink--; 285 if (!inode->i_nlink) 286 atomic_long_inc(&inode->i_sb->s_remove_count); 287 } 288 EXPORT_SYMBOL(drop_nlink); 289 290 /** 291 * clear_nlink - directly zero an inode's link count 292 * @inode: inode 293 * 294 * This is a low-level filesystem helper to replace any 295 * direct filesystem manipulation of i_nlink. See 296 * drop_nlink() for why we care about i_nlink hitting zero. 297 */ 298 void clear_nlink(struct inode *inode) 299 { 300 if (inode->i_nlink) { 301 inode->__i_nlink = 0; 302 atomic_long_inc(&inode->i_sb->s_remove_count); 303 } 304 } 305 EXPORT_SYMBOL(clear_nlink); 306 307 /** 308 * set_nlink - directly set an inode's link count 309 * @inode: inode 310 * @nlink: new nlink (should be non-zero) 311 * 312 * This is a low-level filesystem helper to replace any 313 * direct filesystem manipulation of i_nlink. 314 */ 315 void set_nlink(struct inode *inode, unsigned int nlink) 316 { 317 if (!nlink) { 318 clear_nlink(inode); 319 } else { 320 /* Yes, some filesystems do change nlink from zero to one */ 321 if (inode->i_nlink == 0) 322 atomic_long_dec(&inode->i_sb->s_remove_count); 323 324 inode->__i_nlink = nlink; 325 } 326 } 327 EXPORT_SYMBOL(set_nlink); 328 329 /** 330 * inc_nlink - directly increment an inode's link count 331 * @inode: inode 332 * 333 * This is a low-level filesystem helper to replace any 334 * direct filesystem manipulation of i_nlink. Currently, 335 * it is only here for parity with dec_nlink(). 336 */ 337 void inc_nlink(struct inode *inode) 338 { 339 if (unlikely(inode->i_nlink == 0)) { 340 WARN_ON(!(inode->i_state & I_LINKABLE)); 341 atomic_long_dec(&inode->i_sb->s_remove_count); 342 } 343 344 inode->__i_nlink++; 345 } 346 EXPORT_SYMBOL(inc_nlink); 347 348 void address_space_init_once(struct address_space *mapping) 349 { 350 memset(mapping, 0, sizeof(*mapping)); 351 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT); 352 spin_lock_init(&mapping->tree_lock); 353 init_rwsem(&mapping->i_mmap_rwsem); 354 INIT_LIST_HEAD(&mapping->private_list); 355 spin_lock_init(&mapping->private_lock); 356 mapping->i_mmap = RB_ROOT; 357 } 358 EXPORT_SYMBOL(address_space_init_once); 359 360 /* 361 * These are initializations that only need to be done 362 * once, because the fields are idempotent across use 363 * of the inode, so let the slab aware of that. 364 */ 365 void inode_init_once(struct inode *inode) 366 { 367 memset(inode, 0, sizeof(*inode)); 368 INIT_HLIST_NODE(&inode->i_hash); 369 INIT_LIST_HEAD(&inode->i_devices); 370 INIT_LIST_HEAD(&inode->i_io_list); 371 INIT_LIST_HEAD(&inode->i_wb_list); 372 INIT_LIST_HEAD(&inode->i_lru); 373 address_space_init_once(&inode->i_data); 374 i_size_ordered_init(inode); 375 } 376 EXPORT_SYMBOL(inode_init_once); 377 378 static void init_once(void *foo) 379 { 380 struct inode *inode = (struct inode *) foo; 381 382 inode_init_once(inode); 383 } 384 385 /* 386 * inode->i_lock must be held 387 */ 388 void __iget(struct inode *inode) 389 { 390 atomic_inc(&inode->i_count); 391 } 392 393 /* 394 * get additional reference to inode; caller must already hold one. 395 */ 396 void ihold(struct inode *inode) 397 { 398 WARN_ON(atomic_inc_return(&inode->i_count) < 2); 399 } 400 EXPORT_SYMBOL(ihold); 401 402 static void inode_lru_list_add(struct inode *inode) 403 { 404 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) 405 this_cpu_inc(nr_unused); 406 else 407 inode->i_state |= I_REFERENCED; 408 } 409 410 /* 411 * Add inode to LRU if needed (inode is unused and clean). 412 * 413 * Needs inode->i_lock held. 414 */ 415 void inode_add_lru(struct inode *inode) 416 { 417 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC | 418 I_FREEING | I_WILL_FREE)) && 419 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE) 420 inode_lru_list_add(inode); 421 } 422 423 424 static void inode_lru_list_del(struct inode *inode) 425 { 426 427 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) 428 this_cpu_dec(nr_unused); 429 } 430 431 /** 432 * inode_sb_list_add - add inode to the superblock list of inodes 433 * @inode: inode to add 434 */ 435 void inode_sb_list_add(struct inode *inode) 436 { 437 spin_lock(&inode->i_sb->s_inode_list_lock); 438 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); 439 spin_unlock(&inode->i_sb->s_inode_list_lock); 440 } 441 EXPORT_SYMBOL_GPL(inode_sb_list_add); 442 443 static inline void inode_sb_list_del(struct inode *inode) 444 { 445 if (!list_empty(&inode->i_sb_list)) { 446 spin_lock(&inode->i_sb->s_inode_list_lock); 447 list_del_init(&inode->i_sb_list); 448 spin_unlock(&inode->i_sb->s_inode_list_lock); 449 } 450 } 451 452 static unsigned long hash(struct super_block *sb, unsigned long hashval) 453 { 454 unsigned long tmp; 455 456 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 457 L1_CACHE_BYTES; 458 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); 459 return tmp & i_hash_mask; 460 } 461 462 /** 463 * __insert_inode_hash - hash an inode 464 * @inode: unhashed inode 465 * @hashval: unsigned long value used to locate this object in the 466 * inode_hashtable. 467 * 468 * Add an inode to the inode hash for this superblock. 469 */ 470 void __insert_inode_hash(struct inode *inode, unsigned long hashval) 471 { 472 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 473 474 spin_lock(&inode_hash_lock); 475 spin_lock(&inode->i_lock); 476 hlist_add_head(&inode->i_hash, b); 477 spin_unlock(&inode->i_lock); 478 spin_unlock(&inode_hash_lock); 479 } 480 EXPORT_SYMBOL(__insert_inode_hash); 481 482 /** 483 * __remove_inode_hash - remove an inode from the hash 484 * @inode: inode to unhash 485 * 486 * Remove an inode from the superblock. 487 */ 488 void __remove_inode_hash(struct inode *inode) 489 { 490 spin_lock(&inode_hash_lock); 491 spin_lock(&inode->i_lock); 492 hlist_del_init(&inode->i_hash); 493 spin_unlock(&inode->i_lock); 494 spin_unlock(&inode_hash_lock); 495 } 496 EXPORT_SYMBOL(__remove_inode_hash); 497 498 void clear_inode(struct inode *inode) 499 { 500 might_sleep(); 501 /* 502 * We have to cycle tree_lock here because reclaim can be still in the 503 * process of removing the last page (in __delete_from_page_cache()) 504 * and we must not free mapping under it. 505 */ 506 spin_lock_irq(&inode->i_data.tree_lock); 507 BUG_ON(inode->i_data.nrpages); 508 BUG_ON(inode->i_data.nrexceptional); 509 spin_unlock_irq(&inode->i_data.tree_lock); 510 BUG_ON(!list_empty(&inode->i_data.private_list)); 511 BUG_ON(!(inode->i_state & I_FREEING)); 512 BUG_ON(inode->i_state & I_CLEAR); 513 BUG_ON(!list_empty(&inode->i_wb_list)); 514 /* don't need i_lock here, no concurrent mods to i_state */ 515 inode->i_state = I_FREEING | I_CLEAR; 516 } 517 EXPORT_SYMBOL(clear_inode); 518 519 /* 520 * Free the inode passed in, removing it from the lists it is still connected 521 * to. We remove any pages still attached to the inode and wait for any IO that 522 * is still in progress before finally destroying the inode. 523 * 524 * An inode must already be marked I_FREEING so that we avoid the inode being 525 * moved back onto lists if we race with other code that manipulates the lists 526 * (e.g. writeback_single_inode). The caller is responsible for setting this. 527 * 528 * An inode must already be removed from the LRU list before being evicted from 529 * the cache. This should occur atomically with setting the I_FREEING state 530 * flag, so no inodes here should ever be on the LRU when being evicted. 531 */ 532 static void evict(struct inode *inode) 533 { 534 const struct super_operations *op = inode->i_sb->s_op; 535 536 BUG_ON(!(inode->i_state & I_FREEING)); 537 BUG_ON(!list_empty(&inode->i_lru)); 538 539 if (!list_empty(&inode->i_io_list)) 540 inode_io_list_del(inode); 541 542 inode_sb_list_del(inode); 543 544 /* 545 * Wait for flusher thread to be done with the inode so that filesystem 546 * does not start destroying it while writeback is still running. Since 547 * the inode has I_FREEING set, flusher thread won't start new work on 548 * the inode. We just have to wait for running writeback to finish. 549 */ 550 inode_wait_for_writeback(inode); 551 552 if (op->evict_inode) { 553 op->evict_inode(inode); 554 } else { 555 truncate_inode_pages_final(&inode->i_data); 556 clear_inode(inode); 557 } 558 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 559 bd_forget(inode); 560 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 561 cd_forget(inode); 562 563 remove_inode_hash(inode); 564 565 spin_lock(&inode->i_lock); 566 wake_up_bit(&inode->i_state, __I_NEW); 567 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 568 spin_unlock(&inode->i_lock); 569 570 destroy_inode(inode); 571 } 572 573 /* 574 * dispose_list - dispose of the contents of a local list 575 * @head: the head of the list to free 576 * 577 * Dispose-list gets a local list with local inodes in it, so it doesn't 578 * need to worry about list corruption and SMP locks. 579 */ 580 static void dispose_list(struct list_head *head) 581 { 582 while (!list_empty(head)) { 583 struct inode *inode; 584 585 inode = list_first_entry(head, struct inode, i_lru); 586 list_del_init(&inode->i_lru); 587 588 evict(inode); 589 cond_resched(); 590 } 591 } 592 593 /** 594 * evict_inodes - evict all evictable inodes for a superblock 595 * @sb: superblock to operate on 596 * 597 * Make sure that no inodes with zero refcount are retained. This is 598 * called by superblock shutdown after having MS_ACTIVE flag removed, 599 * so any inode reaching zero refcount during or after that call will 600 * be immediately evicted. 601 */ 602 void evict_inodes(struct super_block *sb) 603 { 604 struct inode *inode, *next; 605 LIST_HEAD(dispose); 606 607 again: 608 spin_lock(&sb->s_inode_list_lock); 609 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 610 if (atomic_read(&inode->i_count)) 611 continue; 612 613 spin_lock(&inode->i_lock); 614 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 615 spin_unlock(&inode->i_lock); 616 continue; 617 } 618 619 inode->i_state |= I_FREEING; 620 inode_lru_list_del(inode); 621 spin_unlock(&inode->i_lock); 622 list_add(&inode->i_lru, &dispose); 623 624 /* 625 * We can have a ton of inodes to evict at unmount time given 626 * enough memory, check to see if we need to go to sleep for a 627 * bit so we don't livelock. 628 */ 629 if (need_resched()) { 630 spin_unlock(&sb->s_inode_list_lock); 631 cond_resched(); 632 dispose_list(&dispose); 633 goto again; 634 } 635 } 636 spin_unlock(&sb->s_inode_list_lock); 637 638 dispose_list(&dispose); 639 } 640 EXPORT_SYMBOL_GPL(evict_inodes); 641 642 /** 643 * invalidate_inodes - attempt to free all inodes on a superblock 644 * @sb: superblock to operate on 645 * @kill_dirty: flag to guide handling of dirty inodes 646 * 647 * Attempts to free all inodes for a given superblock. If there were any 648 * busy inodes return a non-zero value, else zero. 649 * If @kill_dirty is set, discard dirty inodes too, otherwise treat 650 * them as busy. 651 */ 652 int invalidate_inodes(struct super_block *sb, bool kill_dirty) 653 { 654 int busy = 0; 655 struct inode *inode, *next; 656 LIST_HEAD(dispose); 657 658 spin_lock(&sb->s_inode_list_lock); 659 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 660 spin_lock(&inode->i_lock); 661 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 662 spin_unlock(&inode->i_lock); 663 continue; 664 } 665 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) { 666 spin_unlock(&inode->i_lock); 667 busy = 1; 668 continue; 669 } 670 if (atomic_read(&inode->i_count)) { 671 spin_unlock(&inode->i_lock); 672 busy = 1; 673 continue; 674 } 675 676 inode->i_state |= I_FREEING; 677 inode_lru_list_del(inode); 678 spin_unlock(&inode->i_lock); 679 list_add(&inode->i_lru, &dispose); 680 } 681 spin_unlock(&sb->s_inode_list_lock); 682 683 dispose_list(&dispose); 684 685 return busy; 686 } 687 688 /* 689 * Isolate the inode from the LRU in preparation for freeing it. 690 * 691 * Any inodes which are pinned purely because of attached pagecache have their 692 * pagecache removed. If the inode has metadata buffers attached to 693 * mapping->private_list then try to remove them. 694 * 695 * If the inode has the I_REFERENCED flag set, then it means that it has been 696 * used recently - the flag is set in iput_final(). When we encounter such an 697 * inode, clear the flag and move it to the back of the LRU so it gets another 698 * pass through the LRU before it gets reclaimed. This is necessary because of 699 * the fact we are doing lazy LRU updates to minimise lock contention so the 700 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 701 * with this flag set because they are the inodes that are out of order. 702 */ 703 static enum lru_status inode_lru_isolate(struct list_head *item, 704 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 705 { 706 struct list_head *freeable = arg; 707 struct inode *inode = container_of(item, struct inode, i_lru); 708 709 /* 710 * we are inverting the lru lock/inode->i_lock here, so use a trylock. 711 * If we fail to get the lock, just skip it. 712 */ 713 if (!spin_trylock(&inode->i_lock)) 714 return LRU_SKIP; 715 716 /* 717 * Referenced or dirty inodes are still in use. Give them another pass 718 * through the LRU as we canot reclaim them now. 719 */ 720 if (atomic_read(&inode->i_count) || 721 (inode->i_state & ~I_REFERENCED)) { 722 list_lru_isolate(lru, &inode->i_lru); 723 spin_unlock(&inode->i_lock); 724 this_cpu_dec(nr_unused); 725 return LRU_REMOVED; 726 } 727 728 /* recently referenced inodes get one more pass */ 729 if (inode->i_state & I_REFERENCED) { 730 inode->i_state &= ~I_REFERENCED; 731 spin_unlock(&inode->i_lock); 732 return LRU_ROTATE; 733 } 734 735 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 736 __iget(inode); 737 spin_unlock(&inode->i_lock); 738 spin_unlock(lru_lock); 739 if (remove_inode_buffers(inode)) { 740 unsigned long reap; 741 reap = invalidate_mapping_pages(&inode->i_data, 0, -1); 742 if (current_is_kswapd()) 743 __count_vm_events(KSWAPD_INODESTEAL, reap); 744 else 745 __count_vm_events(PGINODESTEAL, reap); 746 if (current->reclaim_state) 747 current->reclaim_state->reclaimed_slab += reap; 748 } 749 iput(inode); 750 spin_lock(lru_lock); 751 return LRU_RETRY; 752 } 753 754 WARN_ON(inode->i_state & I_NEW); 755 inode->i_state |= I_FREEING; 756 list_lru_isolate_move(lru, &inode->i_lru, freeable); 757 spin_unlock(&inode->i_lock); 758 759 this_cpu_dec(nr_unused); 760 return LRU_REMOVED; 761 } 762 763 /* 764 * Walk the superblock inode LRU for freeable inodes and attempt to free them. 765 * This is called from the superblock shrinker function with a number of inodes 766 * to trim from the LRU. Inodes to be freed are moved to a temporary list and 767 * then are freed outside inode_lock by dispose_list(). 768 */ 769 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc) 770 { 771 LIST_HEAD(freeable); 772 long freed; 773 774 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc, 775 inode_lru_isolate, &freeable); 776 dispose_list(&freeable); 777 return freed; 778 } 779 780 static void __wait_on_freeing_inode(struct inode *inode); 781 /* 782 * Called with the inode lock held. 783 */ 784 static struct inode *find_inode(struct super_block *sb, 785 struct hlist_head *head, 786 int (*test)(struct inode *, void *), 787 void *data) 788 { 789 struct inode *inode = NULL; 790 791 repeat: 792 hlist_for_each_entry(inode, head, i_hash) { 793 if (inode->i_sb != sb) 794 continue; 795 if (!test(inode, data)) 796 continue; 797 spin_lock(&inode->i_lock); 798 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 799 __wait_on_freeing_inode(inode); 800 goto repeat; 801 } 802 __iget(inode); 803 spin_unlock(&inode->i_lock); 804 return inode; 805 } 806 return NULL; 807 } 808 809 /* 810 * find_inode_fast is the fast path version of find_inode, see the comment at 811 * iget_locked for details. 812 */ 813 static struct inode *find_inode_fast(struct super_block *sb, 814 struct hlist_head *head, unsigned long ino) 815 { 816 struct inode *inode = NULL; 817 818 repeat: 819 hlist_for_each_entry(inode, head, i_hash) { 820 if (inode->i_ino != ino) 821 continue; 822 if (inode->i_sb != sb) 823 continue; 824 spin_lock(&inode->i_lock); 825 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 826 __wait_on_freeing_inode(inode); 827 goto repeat; 828 } 829 __iget(inode); 830 spin_unlock(&inode->i_lock); 831 return inode; 832 } 833 return NULL; 834 } 835 836 /* 837 * Each cpu owns a range of LAST_INO_BATCH numbers. 838 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, 839 * to renew the exhausted range. 840 * 841 * This does not significantly increase overflow rate because every CPU can 842 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is 843 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the 844 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase 845 * overflow rate by 2x, which does not seem too significant. 846 * 847 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 848 * error if st_ino won't fit in target struct field. Use 32bit counter 849 * here to attempt to avoid that. 850 */ 851 #define LAST_INO_BATCH 1024 852 static DEFINE_PER_CPU(unsigned int, last_ino); 853 854 unsigned int get_next_ino(void) 855 { 856 unsigned int *p = &get_cpu_var(last_ino); 857 unsigned int res = *p; 858 859 #ifdef CONFIG_SMP 860 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { 861 static atomic_t shared_last_ino; 862 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); 863 864 res = next - LAST_INO_BATCH; 865 } 866 #endif 867 868 res++; 869 /* get_next_ino should not provide a 0 inode number */ 870 if (unlikely(!res)) 871 res++; 872 *p = res; 873 put_cpu_var(last_ino); 874 return res; 875 } 876 EXPORT_SYMBOL(get_next_ino); 877 878 /** 879 * new_inode_pseudo - obtain an inode 880 * @sb: superblock 881 * 882 * Allocates a new inode for given superblock. 883 * Inode wont be chained in superblock s_inodes list 884 * This means : 885 * - fs can't be unmount 886 * - quotas, fsnotify, writeback can't work 887 */ 888 struct inode *new_inode_pseudo(struct super_block *sb) 889 { 890 struct inode *inode = alloc_inode(sb); 891 892 if (inode) { 893 spin_lock(&inode->i_lock); 894 inode->i_state = 0; 895 spin_unlock(&inode->i_lock); 896 INIT_LIST_HEAD(&inode->i_sb_list); 897 } 898 return inode; 899 } 900 901 /** 902 * new_inode - obtain an inode 903 * @sb: superblock 904 * 905 * Allocates a new inode for given superblock. The default gfp_mask 906 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. 907 * If HIGHMEM pages are unsuitable or it is known that pages allocated 908 * for the page cache are not reclaimable or migratable, 909 * mapping_set_gfp_mask() must be called with suitable flags on the 910 * newly created inode's mapping 911 * 912 */ 913 struct inode *new_inode(struct super_block *sb) 914 { 915 struct inode *inode; 916 917 spin_lock_prefetch(&sb->s_inode_list_lock); 918 919 inode = new_inode_pseudo(sb); 920 if (inode) 921 inode_sb_list_add(inode); 922 return inode; 923 } 924 EXPORT_SYMBOL(new_inode); 925 926 #ifdef CONFIG_DEBUG_LOCK_ALLOC 927 void lockdep_annotate_inode_mutex_key(struct inode *inode) 928 { 929 if (S_ISDIR(inode->i_mode)) { 930 struct file_system_type *type = inode->i_sb->s_type; 931 932 /* Set new key only if filesystem hasn't already changed it */ 933 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) { 934 /* 935 * ensure nobody is actually holding i_mutex 936 */ 937 // mutex_destroy(&inode->i_mutex); 938 init_rwsem(&inode->i_rwsem); 939 lockdep_set_class(&inode->i_rwsem, 940 &type->i_mutex_dir_key); 941 } 942 } 943 } 944 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); 945 #endif 946 947 /** 948 * unlock_new_inode - clear the I_NEW state and wake up any waiters 949 * @inode: new inode to unlock 950 * 951 * Called when the inode is fully initialised to clear the new state of the 952 * inode and wake up anyone waiting for the inode to finish initialisation. 953 */ 954 void unlock_new_inode(struct inode *inode) 955 { 956 lockdep_annotate_inode_mutex_key(inode); 957 spin_lock(&inode->i_lock); 958 WARN_ON(!(inode->i_state & I_NEW)); 959 inode->i_state &= ~I_NEW; 960 smp_mb(); 961 wake_up_bit(&inode->i_state, __I_NEW); 962 spin_unlock(&inode->i_lock); 963 } 964 EXPORT_SYMBOL(unlock_new_inode); 965 966 /** 967 * lock_two_nondirectories - take two i_mutexes on non-directory objects 968 * 969 * Lock any non-NULL argument that is not a directory. 970 * Zero, one or two objects may be locked by this function. 971 * 972 * @inode1: first inode to lock 973 * @inode2: second inode to lock 974 */ 975 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2) 976 { 977 if (inode1 > inode2) 978 swap(inode1, inode2); 979 980 if (inode1 && !S_ISDIR(inode1->i_mode)) 981 inode_lock(inode1); 982 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 983 inode_lock_nested(inode2, I_MUTEX_NONDIR2); 984 } 985 EXPORT_SYMBOL(lock_two_nondirectories); 986 987 /** 988 * unlock_two_nondirectories - release locks from lock_two_nondirectories() 989 * @inode1: first inode to unlock 990 * @inode2: second inode to unlock 991 */ 992 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2) 993 { 994 if (inode1 && !S_ISDIR(inode1->i_mode)) 995 inode_unlock(inode1); 996 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 997 inode_unlock(inode2); 998 } 999 EXPORT_SYMBOL(unlock_two_nondirectories); 1000 1001 /** 1002 * iget5_locked - obtain an inode from a mounted file system 1003 * @sb: super block of file system 1004 * @hashval: hash value (usually inode number) to get 1005 * @test: callback used for comparisons between inodes 1006 * @set: callback used to initialize a new struct inode 1007 * @data: opaque data pointer to pass to @test and @set 1008 * 1009 * Search for the inode specified by @hashval and @data in the inode cache, 1010 * and if present it is return it with an increased reference count. This is 1011 * a generalized version of iget_locked() for file systems where the inode 1012 * number is not sufficient for unique identification of an inode. 1013 * 1014 * If the inode is not in cache, allocate a new inode and return it locked, 1015 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1016 * before unlocking it via unlock_new_inode(). 1017 * 1018 * Note both @test and @set are called with the inode_hash_lock held, so can't 1019 * sleep. 1020 */ 1021 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 1022 int (*test)(struct inode *, void *), 1023 int (*set)(struct inode *, void *), void *data) 1024 { 1025 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1026 struct inode *inode; 1027 again: 1028 spin_lock(&inode_hash_lock); 1029 inode = find_inode(sb, head, test, data); 1030 spin_unlock(&inode_hash_lock); 1031 1032 if (inode) { 1033 wait_on_inode(inode); 1034 if (unlikely(inode_unhashed(inode))) { 1035 iput(inode); 1036 goto again; 1037 } 1038 return inode; 1039 } 1040 1041 inode = alloc_inode(sb); 1042 if (inode) { 1043 struct inode *old; 1044 1045 spin_lock(&inode_hash_lock); 1046 /* We released the lock, so.. */ 1047 old = find_inode(sb, head, test, data); 1048 if (!old) { 1049 if (set(inode, data)) 1050 goto set_failed; 1051 1052 spin_lock(&inode->i_lock); 1053 inode->i_state = I_NEW; 1054 hlist_add_head(&inode->i_hash, head); 1055 spin_unlock(&inode->i_lock); 1056 inode_sb_list_add(inode); 1057 spin_unlock(&inode_hash_lock); 1058 1059 /* Return the locked inode with I_NEW set, the 1060 * caller is responsible for filling in the contents 1061 */ 1062 return inode; 1063 } 1064 1065 /* 1066 * Uhhuh, somebody else created the same inode under 1067 * us. Use the old inode instead of the one we just 1068 * allocated. 1069 */ 1070 spin_unlock(&inode_hash_lock); 1071 destroy_inode(inode); 1072 inode = old; 1073 wait_on_inode(inode); 1074 if (unlikely(inode_unhashed(inode))) { 1075 iput(inode); 1076 goto again; 1077 } 1078 } 1079 return inode; 1080 1081 set_failed: 1082 spin_unlock(&inode_hash_lock); 1083 destroy_inode(inode); 1084 return NULL; 1085 } 1086 EXPORT_SYMBOL(iget5_locked); 1087 1088 /** 1089 * iget_locked - obtain an inode from a mounted file system 1090 * @sb: super block of file system 1091 * @ino: inode number to get 1092 * 1093 * Search for the inode specified by @ino in the inode cache and if present 1094 * return it with an increased reference count. This is for file systems 1095 * where the inode number is sufficient for unique identification of an inode. 1096 * 1097 * If the inode is not in cache, allocate a new inode and return it locked, 1098 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1099 * before unlocking it via unlock_new_inode(). 1100 */ 1101 struct inode *iget_locked(struct super_block *sb, unsigned long ino) 1102 { 1103 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1104 struct inode *inode; 1105 again: 1106 spin_lock(&inode_hash_lock); 1107 inode = find_inode_fast(sb, head, ino); 1108 spin_unlock(&inode_hash_lock); 1109 if (inode) { 1110 wait_on_inode(inode); 1111 if (unlikely(inode_unhashed(inode))) { 1112 iput(inode); 1113 goto again; 1114 } 1115 return inode; 1116 } 1117 1118 inode = alloc_inode(sb); 1119 if (inode) { 1120 struct inode *old; 1121 1122 spin_lock(&inode_hash_lock); 1123 /* We released the lock, so.. */ 1124 old = find_inode_fast(sb, head, ino); 1125 if (!old) { 1126 inode->i_ino = ino; 1127 spin_lock(&inode->i_lock); 1128 inode->i_state = I_NEW; 1129 hlist_add_head(&inode->i_hash, head); 1130 spin_unlock(&inode->i_lock); 1131 inode_sb_list_add(inode); 1132 spin_unlock(&inode_hash_lock); 1133 1134 /* Return the locked inode with I_NEW set, the 1135 * caller is responsible for filling in the contents 1136 */ 1137 return inode; 1138 } 1139 1140 /* 1141 * Uhhuh, somebody else created the same inode under 1142 * us. Use the old inode instead of the one we just 1143 * allocated. 1144 */ 1145 spin_unlock(&inode_hash_lock); 1146 destroy_inode(inode); 1147 inode = old; 1148 wait_on_inode(inode); 1149 if (unlikely(inode_unhashed(inode))) { 1150 iput(inode); 1151 goto again; 1152 } 1153 } 1154 return inode; 1155 } 1156 EXPORT_SYMBOL(iget_locked); 1157 1158 /* 1159 * search the inode cache for a matching inode number. 1160 * If we find one, then the inode number we are trying to 1161 * allocate is not unique and so we should not use it. 1162 * 1163 * Returns 1 if the inode number is unique, 0 if it is not. 1164 */ 1165 static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1166 { 1167 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1168 struct inode *inode; 1169 1170 spin_lock(&inode_hash_lock); 1171 hlist_for_each_entry(inode, b, i_hash) { 1172 if (inode->i_ino == ino && inode->i_sb == sb) { 1173 spin_unlock(&inode_hash_lock); 1174 return 0; 1175 } 1176 } 1177 spin_unlock(&inode_hash_lock); 1178 1179 return 1; 1180 } 1181 1182 /** 1183 * iunique - get a unique inode number 1184 * @sb: superblock 1185 * @max_reserved: highest reserved inode number 1186 * 1187 * Obtain an inode number that is unique on the system for a given 1188 * superblock. This is used by file systems that have no natural 1189 * permanent inode numbering system. An inode number is returned that 1190 * is higher than the reserved limit but unique. 1191 * 1192 * BUGS: 1193 * With a large number of inodes live on the file system this function 1194 * currently becomes quite slow. 1195 */ 1196 ino_t iunique(struct super_block *sb, ino_t max_reserved) 1197 { 1198 /* 1199 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 1200 * error if st_ino won't fit in target struct field. Use 32bit counter 1201 * here to attempt to avoid that. 1202 */ 1203 static DEFINE_SPINLOCK(iunique_lock); 1204 static unsigned int counter; 1205 ino_t res; 1206 1207 spin_lock(&iunique_lock); 1208 do { 1209 if (counter <= max_reserved) 1210 counter = max_reserved + 1; 1211 res = counter++; 1212 } while (!test_inode_iunique(sb, res)); 1213 spin_unlock(&iunique_lock); 1214 1215 return res; 1216 } 1217 EXPORT_SYMBOL(iunique); 1218 1219 struct inode *igrab(struct inode *inode) 1220 { 1221 spin_lock(&inode->i_lock); 1222 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1223 __iget(inode); 1224 spin_unlock(&inode->i_lock); 1225 } else { 1226 spin_unlock(&inode->i_lock); 1227 /* 1228 * Handle the case where s_op->clear_inode is not been 1229 * called yet, and somebody is calling igrab 1230 * while the inode is getting freed. 1231 */ 1232 inode = NULL; 1233 } 1234 return inode; 1235 } 1236 EXPORT_SYMBOL(igrab); 1237 1238 /** 1239 * ilookup5_nowait - search for an inode in the inode cache 1240 * @sb: super block of file system to search 1241 * @hashval: hash value (usually inode number) to search for 1242 * @test: callback used for comparisons between inodes 1243 * @data: opaque data pointer to pass to @test 1244 * 1245 * Search for the inode specified by @hashval and @data in the inode cache. 1246 * If the inode is in the cache, the inode is returned with an incremented 1247 * reference count. 1248 * 1249 * Note: I_NEW is not waited upon so you have to be very careful what you do 1250 * with the returned inode. You probably should be using ilookup5() instead. 1251 * 1252 * Note2: @test is called with the inode_hash_lock held, so can't sleep. 1253 */ 1254 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1255 int (*test)(struct inode *, void *), void *data) 1256 { 1257 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1258 struct inode *inode; 1259 1260 spin_lock(&inode_hash_lock); 1261 inode = find_inode(sb, head, test, data); 1262 spin_unlock(&inode_hash_lock); 1263 1264 return inode; 1265 } 1266 EXPORT_SYMBOL(ilookup5_nowait); 1267 1268 /** 1269 * ilookup5 - search for an inode in the inode cache 1270 * @sb: super block of file system to search 1271 * @hashval: hash value (usually inode number) to search for 1272 * @test: callback used for comparisons between inodes 1273 * @data: opaque data pointer to pass to @test 1274 * 1275 * Search for the inode specified by @hashval and @data in the inode cache, 1276 * and if the inode is in the cache, return the inode with an incremented 1277 * reference count. Waits on I_NEW before returning the inode. 1278 * returned with an incremented reference count. 1279 * 1280 * This is a generalized version of ilookup() for file systems where the 1281 * inode number is not sufficient for unique identification of an inode. 1282 * 1283 * Note: @test is called with the inode_hash_lock held, so can't sleep. 1284 */ 1285 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1286 int (*test)(struct inode *, void *), void *data) 1287 { 1288 struct inode *inode; 1289 again: 1290 inode = ilookup5_nowait(sb, hashval, test, data); 1291 if (inode) { 1292 wait_on_inode(inode); 1293 if (unlikely(inode_unhashed(inode))) { 1294 iput(inode); 1295 goto again; 1296 } 1297 } 1298 return inode; 1299 } 1300 EXPORT_SYMBOL(ilookup5); 1301 1302 /** 1303 * ilookup - search for an inode in the inode cache 1304 * @sb: super block of file system to search 1305 * @ino: inode number to search for 1306 * 1307 * Search for the inode @ino in the inode cache, and if the inode is in the 1308 * cache, the inode is returned with an incremented reference count. 1309 */ 1310 struct inode *ilookup(struct super_block *sb, unsigned long ino) 1311 { 1312 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1313 struct inode *inode; 1314 again: 1315 spin_lock(&inode_hash_lock); 1316 inode = find_inode_fast(sb, head, ino); 1317 spin_unlock(&inode_hash_lock); 1318 1319 if (inode) { 1320 wait_on_inode(inode); 1321 if (unlikely(inode_unhashed(inode))) { 1322 iput(inode); 1323 goto again; 1324 } 1325 } 1326 return inode; 1327 } 1328 EXPORT_SYMBOL(ilookup); 1329 1330 /** 1331 * find_inode_nowait - find an inode in the inode cache 1332 * @sb: super block of file system to search 1333 * @hashval: hash value (usually inode number) to search for 1334 * @match: callback used for comparisons between inodes 1335 * @data: opaque data pointer to pass to @match 1336 * 1337 * Search for the inode specified by @hashval and @data in the inode 1338 * cache, where the helper function @match will return 0 if the inode 1339 * does not match, 1 if the inode does match, and -1 if the search 1340 * should be stopped. The @match function must be responsible for 1341 * taking the i_lock spin_lock and checking i_state for an inode being 1342 * freed or being initialized, and incrementing the reference count 1343 * before returning 1. It also must not sleep, since it is called with 1344 * the inode_hash_lock spinlock held. 1345 * 1346 * This is a even more generalized version of ilookup5() when the 1347 * function must never block --- find_inode() can block in 1348 * __wait_on_freeing_inode() --- or when the caller can not increment 1349 * the reference count because the resulting iput() might cause an 1350 * inode eviction. The tradeoff is that the @match funtion must be 1351 * very carefully implemented. 1352 */ 1353 struct inode *find_inode_nowait(struct super_block *sb, 1354 unsigned long hashval, 1355 int (*match)(struct inode *, unsigned long, 1356 void *), 1357 void *data) 1358 { 1359 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1360 struct inode *inode, *ret_inode = NULL; 1361 int mval; 1362 1363 spin_lock(&inode_hash_lock); 1364 hlist_for_each_entry(inode, head, i_hash) { 1365 if (inode->i_sb != sb) 1366 continue; 1367 mval = match(inode, hashval, data); 1368 if (mval == 0) 1369 continue; 1370 if (mval == 1) 1371 ret_inode = inode; 1372 goto out; 1373 } 1374 out: 1375 spin_unlock(&inode_hash_lock); 1376 return ret_inode; 1377 } 1378 EXPORT_SYMBOL(find_inode_nowait); 1379 1380 int insert_inode_locked(struct inode *inode) 1381 { 1382 struct super_block *sb = inode->i_sb; 1383 ino_t ino = inode->i_ino; 1384 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1385 1386 while (1) { 1387 struct inode *old = NULL; 1388 spin_lock(&inode_hash_lock); 1389 hlist_for_each_entry(old, head, i_hash) { 1390 if (old->i_ino != ino) 1391 continue; 1392 if (old->i_sb != sb) 1393 continue; 1394 spin_lock(&old->i_lock); 1395 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1396 spin_unlock(&old->i_lock); 1397 continue; 1398 } 1399 break; 1400 } 1401 if (likely(!old)) { 1402 spin_lock(&inode->i_lock); 1403 inode->i_state |= I_NEW; 1404 hlist_add_head(&inode->i_hash, head); 1405 spin_unlock(&inode->i_lock); 1406 spin_unlock(&inode_hash_lock); 1407 return 0; 1408 } 1409 __iget(old); 1410 spin_unlock(&old->i_lock); 1411 spin_unlock(&inode_hash_lock); 1412 wait_on_inode(old); 1413 if (unlikely(!inode_unhashed(old))) { 1414 iput(old); 1415 return -EBUSY; 1416 } 1417 iput(old); 1418 } 1419 } 1420 EXPORT_SYMBOL(insert_inode_locked); 1421 1422 int insert_inode_locked4(struct inode *inode, unsigned long hashval, 1423 int (*test)(struct inode *, void *), void *data) 1424 { 1425 struct super_block *sb = inode->i_sb; 1426 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1427 1428 while (1) { 1429 struct inode *old = NULL; 1430 1431 spin_lock(&inode_hash_lock); 1432 hlist_for_each_entry(old, head, i_hash) { 1433 if (old->i_sb != sb) 1434 continue; 1435 if (!test(old, data)) 1436 continue; 1437 spin_lock(&old->i_lock); 1438 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1439 spin_unlock(&old->i_lock); 1440 continue; 1441 } 1442 break; 1443 } 1444 if (likely(!old)) { 1445 spin_lock(&inode->i_lock); 1446 inode->i_state |= I_NEW; 1447 hlist_add_head(&inode->i_hash, head); 1448 spin_unlock(&inode->i_lock); 1449 spin_unlock(&inode_hash_lock); 1450 return 0; 1451 } 1452 __iget(old); 1453 spin_unlock(&old->i_lock); 1454 spin_unlock(&inode_hash_lock); 1455 wait_on_inode(old); 1456 if (unlikely(!inode_unhashed(old))) { 1457 iput(old); 1458 return -EBUSY; 1459 } 1460 iput(old); 1461 } 1462 } 1463 EXPORT_SYMBOL(insert_inode_locked4); 1464 1465 1466 int generic_delete_inode(struct inode *inode) 1467 { 1468 return 1; 1469 } 1470 EXPORT_SYMBOL(generic_delete_inode); 1471 1472 /* 1473 * Called when we're dropping the last reference 1474 * to an inode. 1475 * 1476 * Call the FS "drop_inode()" function, defaulting to 1477 * the legacy UNIX filesystem behaviour. If it tells 1478 * us to evict inode, do so. Otherwise, retain inode 1479 * in cache if fs is alive, sync and evict if fs is 1480 * shutting down. 1481 */ 1482 static void iput_final(struct inode *inode) 1483 { 1484 struct super_block *sb = inode->i_sb; 1485 const struct super_operations *op = inode->i_sb->s_op; 1486 int drop; 1487 1488 WARN_ON(inode->i_state & I_NEW); 1489 1490 if (op->drop_inode) 1491 drop = op->drop_inode(inode); 1492 else 1493 drop = generic_drop_inode(inode); 1494 1495 if (!drop && (sb->s_flags & MS_ACTIVE)) { 1496 inode_add_lru(inode); 1497 spin_unlock(&inode->i_lock); 1498 return; 1499 } 1500 1501 if (!drop) { 1502 inode->i_state |= I_WILL_FREE; 1503 spin_unlock(&inode->i_lock); 1504 write_inode_now(inode, 1); 1505 spin_lock(&inode->i_lock); 1506 WARN_ON(inode->i_state & I_NEW); 1507 inode->i_state &= ~I_WILL_FREE; 1508 } 1509 1510 inode->i_state |= I_FREEING; 1511 if (!list_empty(&inode->i_lru)) 1512 inode_lru_list_del(inode); 1513 spin_unlock(&inode->i_lock); 1514 1515 evict(inode); 1516 } 1517 1518 /** 1519 * iput - put an inode 1520 * @inode: inode to put 1521 * 1522 * Puts an inode, dropping its usage count. If the inode use count hits 1523 * zero, the inode is then freed and may also be destroyed. 1524 * 1525 * Consequently, iput() can sleep. 1526 */ 1527 void iput(struct inode *inode) 1528 { 1529 if (!inode) 1530 return; 1531 BUG_ON(inode->i_state & I_CLEAR); 1532 retry: 1533 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { 1534 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { 1535 atomic_inc(&inode->i_count); 1536 inode->i_state &= ~I_DIRTY_TIME; 1537 spin_unlock(&inode->i_lock); 1538 trace_writeback_lazytime_iput(inode); 1539 mark_inode_dirty_sync(inode); 1540 goto retry; 1541 } 1542 iput_final(inode); 1543 } 1544 } 1545 EXPORT_SYMBOL(iput); 1546 1547 /** 1548 * bmap - find a block number in a file 1549 * @inode: inode of file 1550 * @block: block to find 1551 * 1552 * Returns the block number on the device holding the inode that 1553 * is the disk block number for the block of the file requested. 1554 * That is, asked for block 4 of inode 1 the function will return the 1555 * disk block relative to the disk start that holds that block of the 1556 * file. 1557 */ 1558 sector_t bmap(struct inode *inode, sector_t block) 1559 { 1560 sector_t res = 0; 1561 if (inode->i_mapping->a_ops->bmap) 1562 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); 1563 return res; 1564 } 1565 EXPORT_SYMBOL(bmap); 1566 1567 /* 1568 * Update times in overlayed inode from underlying real inode 1569 */ 1570 static void update_ovl_inode_times(struct dentry *dentry, struct inode *inode, 1571 bool rcu) 1572 { 1573 if (!rcu) { 1574 struct inode *realinode = d_real_inode(dentry); 1575 1576 if (unlikely(inode != realinode) && 1577 (!timespec_equal(&inode->i_mtime, &realinode->i_mtime) || 1578 !timespec_equal(&inode->i_ctime, &realinode->i_ctime))) { 1579 inode->i_mtime = realinode->i_mtime; 1580 inode->i_ctime = realinode->i_ctime; 1581 } 1582 } 1583 } 1584 1585 /* 1586 * With relative atime, only update atime if the previous atime is 1587 * earlier than either the ctime or mtime or if at least a day has 1588 * passed since the last atime update. 1589 */ 1590 static int relatime_need_update(const struct path *path, struct inode *inode, 1591 struct timespec now, bool rcu) 1592 { 1593 1594 if (!(path->mnt->mnt_flags & MNT_RELATIME)) 1595 return 1; 1596 1597 update_ovl_inode_times(path->dentry, inode, rcu); 1598 /* 1599 * Is mtime younger than atime? If yes, update atime: 1600 */ 1601 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0) 1602 return 1; 1603 /* 1604 * Is ctime younger than atime? If yes, update atime: 1605 */ 1606 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0) 1607 return 1; 1608 1609 /* 1610 * Is the previous atime value older than a day? If yes, 1611 * update atime: 1612 */ 1613 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) 1614 return 1; 1615 /* 1616 * Good, we can skip the atime update: 1617 */ 1618 return 0; 1619 } 1620 1621 int generic_update_time(struct inode *inode, struct timespec *time, int flags) 1622 { 1623 int iflags = I_DIRTY_TIME; 1624 1625 if (flags & S_ATIME) 1626 inode->i_atime = *time; 1627 if (flags & S_VERSION) 1628 inode_inc_iversion(inode); 1629 if (flags & S_CTIME) 1630 inode->i_ctime = *time; 1631 if (flags & S_MTIME) 1632 inode->i_mtime = *time; 1633 1634 if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION)) 1635 iflags |= I_DIRTY_SYNC; 1636 __mark_inode_dirty(inode, iflags); 1637 return 0; 1638 } 1639 EXPORT_SYMBOL(generic_update_time); 1640 1641 /* 1642 * This does the actual work of updating an inodes time or version. Must have 1643 * had called mnt_want_write() before calling this. 1644 */ 1645 static int update_time(struct inode *inode, struct timespec *time, int flags) 1646 { 1647 int (*update_time)(struct inode *, struct timespec *, int); 1648 1649 update_time = inode->i_op->update_time ? inode->i_op->update_time : 1650 generic_update_time; 1651 1652 return update_time(inode, time, flags); 1653 } 1654 1655 /** 1656 * touch_atime - update the access time 1657 * @path: the &struct path to update 1658 * @inode: inode to update 1659 * 1660 * Update the accessed time on an inode and mark it for writeback. 1661 * This function automatically handles read only file systems and media, 1662 * as well as the "noatime" flag and inode specific "noatime" markers. 1663 */ 1664 bool __atime_needs_update(const struct path *path, struct inode *inode, 1665 bool rcu) 1666 { 1667 struct vfsmount *mnt = path->mnt; 1668 struct timespec now; 1669 1670 if (inode->i_flags & S_NOATIME) 1671 return false; 1672 1673 /* Atime updates will likely cause i_uid and i_gid to be written 1674 * back improprely if their true value is unknown to the vfs. 1675 */ 1676 if (HAS_UNMAPPED_ID(inode)) 1677 return false; 1678 1679 if (IS_NOATIME(inode)) 1680 return false; 1681 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)) 1682 return false; 1683 1684 if (mnt->mnt_flags & MNT_NOATIME) 1685 return false; 1686 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) 1687 return false; 1688 1689 now = current_time(inode); 1690 1691 if (!relatime_need_update(path, inode, now, rcu)) 1692 return false; 1693 1694 if (timespec_equal(&inode->i_atime, &now)) 1695 return false; 1696 1697 return true; 1698 } 1699 1700 void touch_atime(const struct path *path) 1701 { 1702 struct vfsmount *mnt = path->mnt; 1703 struct inode *inode = d_inode(path->dentry); 1704 struct timespec now; 1705 1706 if (!__atime_needs_update(path, inode, false)) 1707 return; 1708 1709 if (!sb_start_write_trylock(inode->i_sb)) 1710 return; 1711 1712 if (__mnt_want_write(mnt) != 0) 1713 goto skip_update; 1714 /* 1715 * File systems can error out when updating inodes if they need to 1716 * allocate new space to modify an inode (such is the case for 1717 * Btrfs), but since we touch atime while walking down the path we 1718 * really don't care if we failed to update the atime of the file, 1719 * so just ignore the return value. 1720 * We may also fail on filesystems that have the ability to make parts 1721 * of the fs read only, e.g. subvolumes in Btrfs. 1722 */ 1723 now = current_time(inode); 1724 update_time(inode, &now, S_ATIME); 1725 __mnt_drop_write(mnt); 1726 skip_update: 1727 sb_end_write(inode->i_sb); 1728 } 1729 EXPORT_SYMBOL(touch_atime); 1730 1731 /* 1732 * The logic we want is 1733 * 1734 * if suid or (sgid and xgrp) 1735 * remove privs 1736 */ 1737 int should_remove_suid(struct dentry *dentry) 1738 { 1739 umode_t mode = d_inode(dentry)->i_mode; 1740 int kill = 0; 1741 1742 /* suid always must be killed */ 1743 if (unlikely(mode & S_ISUID)) 1744 kill = ATTR_KILL_SUID; 1745 1746 /* 1747 * sgid without any exec bits is just a mandatory locking mark; leave 1748 * it alone. If some exec bits are set, it's a real sgid; kill it. 1749 */ 1750 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1751 kill |= ATTR_KILL_SGID; 1752 1753 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) 1754 return kill; 1755 1756 return 0; 1757 } 1758 EXPORT_SYMBOL(should_remove_suid); 1759 1760 /* 1761 * Return mask of changes for notify_change() that need to be done as a 1762 * response to write or truncate. Return 0 if nothing has to be changed. 1763 * Negative value on error (change should be denied). 1764 */ 1765 int dentry_needs_remove_privs(struct dentry *dentry) 1766 { 1767 struct inode *inode = d_inode(dentry); 1768 int mask = 0; 1769 int ret; 1770 1771 if (IS_NOSEC(inode)) 1772 return 0; 1773 1774 mask = should_remove_suid(dentry); 1775 ret = security_inode_need_killpriv(dentry); 1776 if (ret < 0) 1777 return ret; 1778 if (ret) 1779 mask |= ATTR_KILL_PRIV; 1780 return mask; 1781 } 1782 1783 static int __remove_privs(struct dentry *dentry, int kill) 1784 { 1785 struct iattr newattrs; 1786 1787 newattrs.ia_valid = ATTR_FORCE | kill; 1788 /* 1789 * Note we call this on write, so notify_change will not 1790 * encounter any conflicting delegations: 1791 */ 1792 return notify_change(dentry, &newattrs, NULL); 1793 } 1794 1795 /* 1796 * Remove special file priviledges (suid, capabilities) when file is written 1797 * to or truncated. 1798 */ 1799 int file_remove_privs(struct file *file) 1800 { 1801 struct dentry *dentry = file_dentry(file); 1802 struct inode *inode = file_inode(file); 1803 int kill; 1804 int error = 0; 1805 1806 /* Fast path for nothing security related */ 1807 if (IS_NOSEC(inode)) 1808 return 0; 1809 1810 kill = dentry_needs_remove_privs(dentry); 1811 if (kill < 0) 1812 return kill; 1813 if (kill) 1814 error = __remove_privs(dentry, kill); 1815 if (!error) 1816 inode_has_no_xattr(inode); 1817 1818 return error; 1819 } 1820 EXPORT_SYMBOL(file_remove_privs); 1821 1822 /** 1823 * file_update_time - update mtime and ctime time 1824 * @file: file accessed 1825 * 1826 * Update the mtime and ctime members of an inode and mark the inode 1827 * for writeback. Note that this function is meant exclusively for 1828 * usage in the file write path of filesystems, and filesystems may 1829 * choose to explicitly ignore update via this function with the 1830 * S_NOCMTIME inode flag, e.g. for network filesystem where these 1831 * timestamps are handled by the server. This can return an error for 1832 * file systems who need to allocate space in order to update an inode. 1833 */ 1834 1835 int file_update_time(struct file *file) 1836 { 1837 struct inode *inode = file_inode(file); 1838 struct timespec now; 1839 int sync_it = 0; 1840 int ret; 1841 1842 /* First try to exhaust all avenues to not sync */ 1843 if (IS_NOCMTIME(inode)) 1844 return 0; 1845 1846 now = current_time(inode); 1847 if (!timespec_equal(&inode->i_mtime, &now)) 1848 sync_it = S_MTIME; 1849 1850 if (!timespec_equal(&inode->i_ctime, &now)) 1851 sync_it |= S_CTIME; 1852 1853 if (IS_I_VERSION(inode)) 1854 sync_it |= S_VERSION; 1855 1856 if (!sync_it) 1857 return 0; 1858 1859 /* Finally allowed to write? Takes lock. */ 1860 if (__mnt_want_write_file(file)) 1861 return 0; 1862 1863 ret = update_time(inode, &now, sync_it); 1864 __mnt_drop_write_file(file); 1865 1866 return ret; 1867 } 1868 EXPORT_SYMBOL(file_update_time); 1869 1870 int inode_needs_sync(struct inode *inode) 1871 { 1872 if (IS_SYNC(inode)) 1873 return 1; 1874 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 1875 return 1; 1876 return 0; 1877 } 1878 EXPORT_SYMBOL(inode_needs_sync); 1879 1880 /* 1881 * If we try to find an inode in the inode hash while it is being 1882 * deleted, we have to wait until the filesystem completes its 1883 * deletion before reporting that it isn't found. This function waits 1884 * until the deletion _might_ have completed. Callers are responsible 1885 * to recheck inode state. 1886 * 1887 * It doesn't matter if I_NEW is not set initially, a call to 1888 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list 1889 * will DTRT. 1890 */ 1891 static void __wait_on_freeing_inode(struct inode *inode) 1892 { 1893 wait_queue_head_t *wq; 1894 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 1895 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1896 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 1897 spin_unlock(&inode->i_lock); 1898 spin_unlock(&inode_hash_lock); 1899 schedule(); 1900 finish_wait(wq, &wait.wq_entry); 1901 spin_lock(&inode_hash_lock); 1902 } 1903 1904 static __initdata unsigned long ihash_entries; 1905 static int __init set_ihash_entries(char *str) 1906 { 1907 if (!str) 1908 return 0; 1909 ihash_entries = simple_strtoul(str, &str, 0); 1910 return 1; 1911 } 1912 __setup("ihash_entries=", set_ihash_entries); 1913 1914 /* 1915 * Initialize the waitqueues and inode hash table. 1916 */ 1917 void __init inode_init_early(void) 1918 { 1919 /* If hashes are distributed across NUMA nodes, defer 1920 * hash allocation until vmalloc space is available. 1921 */ 1922 if (hashdist) 1923 return; 1924 1925 inode_hashtable = 1926 alloc_large_system_hash("Inode-cache", 1927 sizeof(struct hlist_head), 1928 ihash_entries, 1929 14, 1930 HASH_EARLY | HASH_ZERO, 1931 &i_hash_shift, 1932 &i_hash_mask, 1933 0, 1934 0); 1935 } 1936 1937 void __init inode_init(void) 1938 { 1939 /* inode slab cache */ 1940 inode_cachep = kmem_cache_create("inode_cache", 1941 sizeof(struct inode), 1942 0, 1943 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 1944 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 1945 init_once); 1946 1947 /* Hash may have been set up in inode_init_early */ 1948 if (!hashdist) 1949 return; 1950 1951 inode_hashtable = 1952 alloc_large_system_hash("Inode-cache", 1953 sizeof(struct hlist_head), 1954 ihash_entries, 1955 14, 1956 HASH_ZERO, 1957 &i_hash_shift, 1958 &i_hash_mask, 1959 0, 1960 0); 1961 } 1962 1963 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) 1964 { 1965 inode->i_mode = mode; 1966 if (S_ISCHR(mode)) { 1967 inode->i_fop = &def_chr_fops; 1968 inode->i_rdev = rdev; 1969 } else if (S_ISBLK(mode)) { 1970 inode->i_fop = &def_blk_fops; 1971 inode->i_rdev = rdev; 1972 } else if (S_ISFIFO(mode)) 1973 inode->i_fop = &pipefifo_fops; 1974 else if (S_ISSOCK(mode)) 1975 ; /* leave it no_open_fops */ 1976 else 1977 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" 1978 " inode %s:%lu\n", mode, inode->i_sb->s_id, 1979 inode->i_ino); 1980 } 1981 EXPORT_SYMBOL(init_special_inode); 1982 1983 /** 1984 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards 1985 * @inode: New inode 1986 * @dir: Directory inode 1987 * @mode: mode of the new inode 1988 */ 1989 void inode_init_owner(struct inode *inode, const struct inode *dir, 1990 umode_t mode) 1991 { 1992 inode->i_uid = current_fsuid(); 1993 if (dir && dir->i_mode & S_ISGID) { 1994 inode->i_gid = dir->i_gid; 1995 if (S_ISDIR(mode)) 1996 mode |= S_ISGID; 1997 } else 1998 inode->i_gid = current_fsgid(); 1999 inode->i_mode = mode; 2000 } 2001 EXPORT_SYMBOL(inode_init_owner); 2002 2003 /** 2004 * inode_owner_or_capable - check current task permissions to inode 2005 * @inode: inode being checked 2006 * 2007 * Return true if current either has CAP_FOWNER in a namespace with the 2008 * inode owner uid mapped, or owns the file. 2009 */ 2010 bool inode_owner_or_capable(const struct inode *inode) 2011 { 2012 struct user_namespace *ns; 2013 2014 if (uid_eq(current_fsuid(), inode->i_uid)) 2015 return true; 2016 2017 ns = current_user_ns(); 2018 if (kuid_has_mapping(ns, inode->i_uid) && ns_capable(ns, CAP_FOWNER)) 2019 return true; 2020 return false; 2021 } 2022 EXPORT_SYMBOL(inode_owner_or_capable); 2023 2024 /* 2025 * Direct i/o helper functions 2026 */ 2027 static void __inode_dio_wait(struct inode *inode) 2028 { 2029 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); 2030 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); 2031 2032 do { 2033 prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE); 2034 if (atomic_read(&inode->i_dio_count)) 2035 schedule(); 2036 } while (atomic_read(&inode->i_dio_count)); 2037 finish_wait(wq, &q.wq_entry); 2038 } 2039 2040 /** 2041 * inode_dio_wait - wait for outstanding DIO requests to finish 2042 * @inode: inode to wait for 2043 * 2044 * Waits for all pending direct I/O requests to finish so that we can 2045 * proceed with a truncate or equivalent operation. 2046 * 2047 * Must be called under a lock that serializes taking new references 2048 * to i_dio_count, usually by inode->i_mutex. 2049 */ 2050 void inode_dio_wait(struct inode *inode) 2051 { 2052 if (atomic_read(&inode->i_dio_count)) 2053 __inode_dio_wait(inode); 2054 } 2055 EXPORT_SYMBOL(inode_dio_wait); 2056 2057 /* 2058 * inode_set_flags - atomically set some inode flags 2059 * 2060 * Note: the caller should be holding i_mutex, or else be sure that 2061 * they have exclusive access to the inode structure (i.e., while the 2062 * inode is being instantiated). The reason for the cmpxchg() loop 2063 * --- which wouldn't be necessary if all code paths which modify 2064 * i_flags actually followed this rule, is that there is at least one 2065 * code path which doesn't today so we use cmpxchg() out of an abundance 2066 * of caution. 2067 * 2068 * In the long run, i_mutex is overkill, and we should probably look 2069 * at using the i_lock spinlock to protect i_flags, and then make sure 2070 * it is so documented in include/linux/fs.h and that all code follows 2071 * the locking convention!! 2072 */ 2073 void inode_set_flags(struct inode *inode, unsigned int flags, 2074 unsigned int mask) 2075 { 2076 unsigned int old_flags, new_flags; 2077 2078 WARN_ON_ONCE(flags & ~mask); 2079 do { 2080 old_flags = ACCESS_ONCE(inode->i_flags); 2081 new_flags = (old_flags & ~mask) | flags; 2082 } while (unlikely(cmpxchg(&inode->i_flags, old_flags, 2083 new_flags) != old_flags)); 2084 } 2085 EXPORT_SYMBOL(inode_set_flags); 2086 2087 void inode_nohighmem(struct inode *inode) 2088 { 2089 mapping_set_gfp_mask(inode->i_mapping, GFP_USER); 2090 } 2091 EXPORT_SYMBOL(inode_nohighmem); 2092 2093 /** 2094 * current_time - Return FS time 2095 * @inode: inode. 2096 * 2097 * Return the current time truncated to the time granularity supported by 2098 * the fs. 2099 * 2100 * Note that inode and inode->sb cannot be NULL. 2101 * Otherwise, the function warns and returns time without truncation. 2102 */ 2103 struct timespec current_time(struct inode *inode) 2104 { 2105 struct timespec now = current_kernel_time(); 2106 2107 if (unlikely(!inode->i_sb)) { 2108 WARN(1, "current_time() called with uninitialized super_block in the inode"); 2109 return now; 2110 } 2111 2112 return timespec_trunc(now, inode->i_sb->s_time_gran); 2113 } 2114 EXPORT_SYMBOL(current_time); 2115