1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * (C) 1997 Linus Torvalds 4 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation) 5 */ 6 #include <linux/export.h> 7 #include <linux/fs.h> 8 #include <linux/filelock.h> 9 #include <linux/mm.h> 10 #include <linux/backing-dev.h> 11 #include <linux/hash.h> 12 #include <linux/swap.h> 13 #include <linux/security.h> 14 #include <linux/cdev.h> 15 #include <linux/memblock.h> 16 #include <linux/fsnotify.h> 17 #include <linux/mount.h> 18 #include <linux/posix_acl.h> 19 #include <linux/buffer_head.h> /* for inode_has_buffers */ 20 #include <linux/ratelimit.h> 21 #include <linux/list_lru.h> 22 #include <linux/iversion.h> 23 #include <linux/rw_hint.h> 24 #include <linux/seq_file.h> 25 #include <linux/debugfs.h> 26 #include <trace/events/writeback.h> 27 #define CREATE_TRACE_POINTS 28 #include <trace/events/timestamp.h> 29 30 #include "internal.h" 31 32 /* 33 * Inode locking rules: 34 * 35 * inode->i_lock protects: 36 * inode->i_state, inode->i_hash, __iget(), inode->i_io_list 37 * Inode LRU list locks protect: 38 * inode->i_sb->s_inode_lru, inode->i_lru 39 * inode->i_sb->s_inode_list_lock protects: 40 * inode->i_sb->s_inodes, inode->i_sb_list 41 * bdi->wb.list_lock protects: 42 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list 43 * inode_hash_lock protects: 44 * inode_hashtable, inode->i_hash 45 * 46 * Lock ordering: 47 * 48 * inode->i_sb->s_inode_list_lock 49 * inode->i_lock 50 * Inode LRU list locks 51 * 52 * bdi->wb.list_lock 53 * inode->i_lock 54 * 55 * inode_hash_lock 56 * inode->i_sb->s_inode_list_lock 57 * inode->i_lock 58 * 59 * iunique_lock 60 * inode_hash_lock 61 */ 62 63 static unsigned int i_hash_mask __ro_after_init; 64 static unsigned int i_hash_shift __ro_after_init; 65 static struct hlist_head *inode_hashtable __ro_after_init; 66 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 67 68 /* 69 * Empty aops. Can be used for the cases where the user does not 70 * define any of the address_space operations. 71 */ 72 const struct address_space_operations empty_aops = { 73 }; 74 EXPORT_SYMBOL(empty_aops); 75 76 static DEFINE_PER_CPU(unsigned long, nr_inodes); 77 static DEFINE_PER_CPU(unsigned long, nr_unused); 78 79 static struct kmem_cache *inode_cachep __ro_after_init; 80 81 static long get_nr_inodes(void) 82 { 83 int i; 84 long sum = 0; 85 for_each_possible_cpu(i) 86 sum += per_cpu(nr_inodes, i); 87 return sum < 0 ? 0 : sum; 88 } 89 90 static inline long get_nr_inodes_unused(void) 91 { 92 int i; 93 long sum = 0; 94 for_each_possible_cpu(i) 95 sum += per_cpu(nr_unused, i); 96 return sum < 0 ? 0 : sum; 97 } 98 99 long get_nr_dirty_inodes(void) 100 { 101 /* not actually dirty inodes, but a wild approximation */ 102 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); 103 return nr_dirty > 0 ? nr_dirty : 0; 104 } 105 106 #ifdef CONFIG_DEBUG_FS 107 static DEFINE_PER_CPU(long, mg_ctime_updates); 108 static DEFINE_PER_CPU(long, mg_fine_stamps); 109 static DEFINE_PER_CPU(long, mg_ctime_swaps); 110 111 static unsigned long get_mg_ctime_updates(void) 112 { 113 unsigned long sum = 0; 114 int i; 115 116 for_each_possible_cpu(i) 117 sum += data_race(per_cpu(mg_ctime_updates, i)); 118 return sum; 119 } 120 121 static unsigned long get_mg_fine_stamps(void) 122 { 123 unsigned long sum = 0; 124 int i; 125 126 for_each_possible_cpu(i) 127 sum += data_race(per_cpu(mg_fine_stamps, i)); 128 return sum; 129 } 130 131 static unsigned long get_mg_ctime_swaps(void) 132 { 133 unsigned long sum = 0; 134 int i; 135 136 for_each_possible_cpu(i) 137 sum += data_race(per_cpu(mg_ctime_swaps, i)); 138 return sum; 139 } 140 141 #define mgtime_counter_inc(__var) this_cpu_inc(__var) 142 143 static int mgts_show(struct seq_file *s, void *p) 144 { 145 unsigned long ctime_updates = get_mg_ctime_updates(); 146 unsigned long ctime_swaps = get_mg_ctime_swaps(); 147 unsigned long fine_stamps = get_mg_fine_stamps(); 148 unsigned long floor_swaps = timekeeping_get_mg_floor_swaps(); 149 150 seq_printf(s, "%lu %lu %lu %lu\n", 151 ctime_updates, ctime_swaps, fine_stamps, floor_swaps); 152 return 0; 153 } 154 155 DEFINE_SHOW_ATTRIBUTE(mgts); 156 157 static int __init mg_debugfs_init(void) 158 { 159 debugfs_create_file("multigrain_timestamps", S_IFREG | S_IRUGO, NULL, NULL, &mgts_fops); 160 return 0; 161 } 162 late_initcall(mg_debugfs_init); 163 164 #else /* ! CONFIG_DEBUG_FS */ 165 166 #define mgtime_counter_inc(__var) do { } while (0) 167 168 #endif /* CONFIG_DEBUG_FS */ 169 170 /* 171 * Handle nr_inode sysctl 172 */ 173 #ifdef CONFIG_SYSCTL 174 /* 175 * Statistics gathering.. 176 */ 177 static struct inodes_stat_t inodes_stat; 178 179 static int proc_nr_inodes(const struct ctl_table *table, int write, void *buffer, 180 size_t *lenp, loff_t *ppos) 181 { 182 inodes_stat.nr_inodes = get_nr_inodes(); 183 inodes_stat.nr_unused = get_nr_inodes_unused(); 184 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 185 } 186 187 static struct ctl_table inodes_sysctls[] = { 188 { 189 .procname = "inode-nr", 190 .data = &inodes_stat, 191 .maxlen = 2*sizeof(long), 192 .mode = 0444, 193 .proc_handler = proc_nr_inodes, 194 }, 195 { 196 .procname = "inode-state", 197 .data = &inodes_stat, 198 .maxlen = 7*sizeof(long), 199 .mode = 0444, 200 .proc_handler = proc_nr_inodes, 201 }, 202 }; 203 204 static int __init init_fs_inode_sysctls(void) 205 { 206 register_sysctl_init("fs", inodes_sysctls); 207 return 0; 208 } 209 early_initcall(init_fs_inode_sysctls); 210 #endif 211 212 static int no_open(struct inode *inode, struct file *file) 213 { 214 return -ENXIO; 215 } 216 217 /** 218 * inode_init_always_gfp - perform inode structure initialisation 219 * @sb: superblock inode belongs to 220 * @inode: inode to initialise 221 * @gfp: allocation flags 222 * 223 * These are initializations that need to be done on every inode 224 * allocation as the fields are not initialised by slab allocation. 225 * If there are additional allocations required @gfp is used. 226 */ 227 int inode_init_always_gfp(struct super_block *sb, struct inode *inode, gfp_t gfp) 228 { 229 static const struct inode_operations empty_iops; 230 static const struct file_operations no_open_fops = {.open = no_open}; 231 struct address_space *const mapping = &inode->i_data; 232 233 inode->i_sb = sb; 234 inode->i_blkbits = sb->s_blocksize_bits; 235 inode->i_flags = 0; 236 inode->i_state = 0; 237 atomic64_set(&inode->i_sequence, 0); 238 atomic_set(&inode->i_count, 1); 239 inode->i_op = &empty_iops; 240 inode->i_fop = &no_open_fops; 241 inode->i_ino = 0; 242 inode->__i_nlink = 1; 243 inode->i_opflags = 0; 244 if (sb->s_xattr) 245 inode->i_opflags |= IOP_XATTR; 246 if (sb->s_type->fs_flags & FS_MGTIME) 247 inode->i_opflags |= IOP_MGTIME; 248 i_uid_write(inode, 0); 249 i_gid_write(inode, 0); 250 atomic_set(&inode->i_writecount, 0); 251 inode->i_size = 0; 252 inode->i_write_hint = WRITE_LIFE_NOT_SET; 253 inode->i_blocks = 0; 254 inode->i_bytes = 0; 255 inode->i_generation = 0; 256 inode->i_pipe = NULL; 257 inode->i_cdev = NULL; 258 inode->i_link = NULL; 259 inode->i_dir_seq = 0; 260 inode->i_rdev = 0; 261 inode->dirtied_when = 0; 262 263 #ifdef CONFIG_CGROUP_WRITEBACK 264 inode->i_wb_frn_winner = 0; 265 inode->i_wb_frn_avg_time = 0; 266 inode->i_wb_frn_history = 0; 267 #endif 268 269 spin_lock_init(&inode->i_lock); 270 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 271 272 init_rwsem(&inode->i_rwsem); 273 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key); 274 275 atomic_set(&inode->i_dio_count, 0); 276 277 mapping->a_ops = &empty_aops; 278 mapping->host = inode; 279 mapping->flags = 0; 280 mapping->wb_err = 0; 281 atomic_set(&mapping->i_mmap_writable, 0); 282 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 283 atomic_set(&mapping->nr_thps, 0); 284 #endif 285 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 286 mapping->i_private_data = NULL; 287 mapping->writeback_index = 0; 288 init_rwsem(&mapping->invalidate_lock); 289 lockdep_set_class_and_name(&mapping->invalidate_lock, 290 &sb->s_type->invalidate_lock_key, 291 "mapping.invalidate_lock"); 292 if (sb->s_iflags & SB_I_STABLE_WRITES) 293 mapping_set_stable_writes(mapping); 294 inode->i_private = NULL; 295 inode->i_mapping = mapping; 296 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ 297 #ifdef CONFIG_FS_POSIX_ACL 298 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; 299 #endif 300 301 #ifdef CONFIG_FSNOTIFY 302 inode->i_fsnotify_mask = 0; 303 #endif 304 inode->i_flctx = NULL; 305 306 if (unlikely(security_inode_alloc(inode, gfp))) 307 return -ENOMEM; 308 309 this_cpu_inc(nr_inodes); 310 311 return 0; 312 } 313 EXPORT_SYMBOL(inode_init_always_gfp); 314 315 void free_inode_nonrcu(struct inode *inode) 316 { 317 kmem_cache_free(inode_cachep, inode); 318 } 319 EXPORT_SYMBOL(free_inode_nonrcu); 320 321 static void i_callback(struct rcu_head *head) 322 { 323 struct inode *inode = container_of(head, struct inode, i_rcu); 324 if (inode->free_inode) 325 inode->free_inode(inode); 326 else 327 free_inode_nonrcu(inode); 328 } 329 330 static struct inode *alloc_inode(struct super_block *sb) 331 { 332 const struct super_operations *ops = sb->s_op; 333 struct inode *inode; 334 335 if (ops->alloc_inode) 336 inode = ops->alloc_inode(sb); 337 else 338 inode = alloc_inode_sb(sb, inode_cachep, GFP_KERNEL); 339 340 if (!inode) 341 return NULL; 342 343 if (unlikely(inode_init_always(sb, inode))) { 344 if (ops->destroy_inode) { 345 ops->destroy_inode(inode); 346 if (!ops->free_inode) 347 return NULL; 348 } 349 inode->free_inode = ops->free_inode; 350 i_callback(&inode->i_rcu); 351 return NULL; 352 } 353 354 return inode; 355 } 356 357 void __destroy_inode(struct inode *inode) 358 { 359 BUG_ON(inode_has_buffers(inode)); 360 inode_detach_wb(inode); 361 security_inode_free(inode); 362 fsnotify_inode_delete(inode); 363 locks_free_lock_context(inode); 364 if (!inode->i_nlink) { 365 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 366 atomic_long_dec(&inode->i_sb->s_remove_count); 367 } 368 369 #ifdef CONFIG_FS_POSIX_ACL 370 if (inode->i_acl && !is_uncached_acl(inode->i_acl)) 371 posix_acl_release(inode->i_acl); 372 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl)) 373 posix_acl_release(inode->i_default_acl); 374 #endif 375 this_cpu_dec(nr_inodes); 376 } 377 EXPORT_SYMBOL(__destroy_inode); 378 379 static void destroy_inode(struct inode *inode) 380 { 381 const struct super_operations *ops = inode->i_sb->s_op; 382 383 BUG_ON(!list_empty(&inode->i_lru)); 384 __destroy_inode(inode); 385 if (ops->destroy_inode) { 386 ops->destroy_inode(inode); 387 if (!ops->free_inode) 388 return; 389 } 390 inode->free_inode = ops->free_inode; 391 call_rcu(&inode->i_rcu, i_callback); 392 } 393 394 /** 395 * drop_nlink - directly drop an inode's link count 396 * @inode: inode 397 * 398 * This is a low-level filesystem helper to replace any 399 * direct filesystem manipulation of i_nlink. In cases 400 * where we are attempting to track writes to the 401 * filesystem, a decrement to zero means an imminent 402 * write when the file is truncated and actually unlinked 403 * on the filesystem. 404 */ 405 void drop_nlink(struct inode *inode) 406 { 407 WARN_ON(inode->i_nlink == 0); 408 inode->__i_nlink--; 409 if (!inode->i_nlink) 410 atomic_long_inc(&inode->i_sb->s_remove_count); 411 } 412 EXPORT_SYMBOL(drop_nlink); 413 414 /** 415 * clear_nlink - directly zero an inode's link count 416 * @inode: inode 417 * 418 * This is a low-level filesystem helper to replace any 419 * direct filesystem manipulation of i_nlink. See 420 * drop_nlink() for why we care about i_nlink hitting zero. 421 */ 422 void clear_nlink(struct inode *inode) 423 { 424 if (inode->i_nlink) { 425 inode->__i_nlink = 0; 426 atomic_long_inc(&inode->i_sb->s_remove_count); 427 } 428 } 429 EXPORT_SYMBOL(clear_nlink); 430 431 /** 432 * set_nlink - directly set an inode's link count 433 * @inode: inode 434 * @nlink: new nlink (should be non-zero) 435 * 436 * This is a low-level filesystem helper to replace any 437 * direct filesystem manipulation of i_nlink. 438 */ 439 void set_nlink(struct inode *inode, unsigned int nlink) 440 { 441 if (!nlink) { 442 clear_nlink(inode); 443 } else { 444 /* Yes, some filesystems do change nlink from zero to one */ 445 if (inode->i_nlink == 0) 446 atomic_long_dec(&inode->i_sb->s_remove_count); 447 448 inode->__i_nlink = nlink; 449 } 450 } 451 EXPORT_SYMBOL(set_nlink); 452 453 /** 454 * inc_nlink - directly increment an inode's link count 455 * @inode: inode 456 * 457 * This is a low-level filesystem helper to replace any 458 * direct filesystem manipulation of i_nlink. Currently, 459 * it is only here for parity with dec_nlink(). 460 */ 461 void inc_nlink(struct inode *inode) 462 { 463 if (unlikely(inode->i_nlink == 0)) { 464 WARN_ON(!(inode->i_state & I_LINKABLE)); 465 atomic_long_dec(&inode->i_sb->s_remove_count); 466 } 467 468 inode->__i_nlink++; 469 } 470 EXPORT_SYMBOL(inc_nlink); 471 472 static void __address_space_init_once(struct address_space *mapping) 473 { 474 xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT); 475 init_rwsem(&mapping->i_mmap_rwsem); 476 INIT_LIST_HEAD(&mapping->i_private_list); 477 spin_lock_init(&mapping->i_private_lock); 478 mapping->i_mmap = RB_ROOT_CACHED; 479 } 480 481 void address_space_init_once(struct address_space *mapping) 482 { 483 memset(mapping, 0, sizeof(*mapping)); 484 __address_space_init_once(mapping); 485 } 486 EXPORT_SYMBOL(address_space_init_once); 487 488 /* 489 * These are initializations that only need to be done 490 * once, because the fields are idempotent across use 491 * of the inode, so let the slab aware of that. 492 */ 493 void inode_init_once(struct inode *inode) 494 { 495 memset(inode, 0, sizeof(*inode)); 496 INIT_HLIST_NODE(&inode->i_hash); 497 INIT_LIST_HEAD(&inode->i_devices); 498 INIT_LIST_HEAD(&inode->i_io_list); 499 INIT_LIST_HEAD(&inode->i_wb_list); 500 INIT_LIST_HEAD(&inode->i_lru); 501 INIT_LIST_HEAD(&inode->i_sb_list); 502 __address_space_init_once(&inode->i_data); 503 i_size_ordered_init(inode); 504 } 505 EXPORT_SYMBOL(inode_init_once); 506 507 static void init_once(void *foo) 508 { 509 struct inode *inode = (struct inode *) foo; 510 511 inode_init_once(inode); 512 } 513 514 /* 515 * get additional reference to inode; caller must already hold one. 516 */ 517 void ihold(struct inode *inode) 518 { 519 WARN_ON(atomic_inc_return(&inode->i_count) < 2); 520 } 521 EXPORT_SYMBOL(ihold); 522 523 static void __inode_add_lru(struct inode *inode, bool rotate) 524 { 525 if (inode->i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE)) 526 return; 527 if (atomic_read(&inode->i_count)) 528 return; 529 if (!(inode->i_sb->s_flags & SB_ACTIVE)) 530 return; 531 if (!mapping_shrinkable(&inode->i_data)) 532 return; 533 534 if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru)) 535 this_cpu_inc(nr_unused); 536 else if (rotate) 537 inode->i_state |= I_REFERENCED; 538 } 539 540 struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe, 541 struct inode *inode, u32 bit) 542 { 543 void *bit_address; 544 545 bit_address = inode_state_wait_address(inode, bit); 546 init_wait_var_entry(wqe, bit_address, 0); 547 return __var_waitqueue(bit_address); 548 } 549 EXPORT_SYMBOL(inode_bit_waitqueue); 550 551 /* 552 * Add inode to LRU if needed (inode is unused and clean). 553 * 554 * Needs inode->i_lock held. 555 */ 556 void inode_add_lru(struct inode *inode) 557 { 558 __inode_add_lru(inode, false); 559 } 560 561 static void inode_lru_list_del(struct inode *inode) 562 { 563 if (list_lru_del_obj(&inode->i_sb->s_inode_lru, &inode->i_lru)) 564 this_cpu_dec(nr_unused); 565 } 566 567 static void inode_pin_lru_isolating(struct inode *inode) 568 { 569 lockdep_assert_held(&inode->i_lock); 570 WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE)); 571 inode->i_state |= I_LRU_ISOLATING; 572 } 573 574 static void inode_unpin_lru_isolating(struct inode *inode) 575 { 576 spin_lock(&inode->i_lock); 577 WARN_ON(!(inode->i_state & I_LRU_ISOLATING)); 578 inode->i_state &= ~I_LRU_ISOLATING; 579 /* Called with inode->i_lock which ensures memory ordering. */ 580 inode_wake_up_bit(inode, __I_LRU_ISOLATING); 581 spin_unlock(&inode->i_lock); 582 } 583 584 static void inode_wait_for_lru_isolating(struct inode *inode) 585 { 586 struct wait_bit_queue_entry wqe; 587 struct wait_queue_head *wq_head; 588 589 lockdep_assert_held(&inode->i_lock); 590 if (!(inode->i_state & I_LRU_ISOLATING)) 591 return; 592 593 wq_head = inode_bit_waitqueue(&wqe, inode, __I_LRU_ISOLATING); 594 for (;;) { 595 prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE); 596 /* 597 * Checking I_LRU_ISOLATING with inode->i_lock guarantees 598 * memory ordering. 599 */ 600 if (!(inode->i_state & I_LRU_ISOLATING)) 601 break; 602 spin_unlock(&inode->i_lock); 603 schedule(); 604 spin_lock(&inode->i_lock); 605 } 606 finish_wait(wq_head, &wqe.wq_entry); 607 WARN_ON(inode->i_state & I_LRU_ISOLATING); 608 } 609 610 /** 611 * inode_sb_list_add - add inode to the superblock list of inodes 612 * @inode: inode to add 613 */ 614 void inode_sb_list_add(struct inode *inode) 615 { 616 spin_lock(&inode->i_sb->s_inode_list_lock); 617 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); 618 spin_unlock(&inode->i_sb->s_inode_list_lock); 619 } 620 EXPORT_SYMBOL_GPL(inode_sb_list_add); 621 622 static inline void inode_sb_list_del(struct inode *inode) 623 { 624 if (!list_empty(&inode->i_sb_list)) { 625 spin_lock(&inode->i_sb->s_inode_list_lock); 626 list_del_init(&inode->i_sb_list); 627 spin_unlock(&inode->i_sb->s_inode_list_lock); 628 } 629 } 630 631 static unsigned long hash(struct super_block *sb, unsigned long hashval) 632 { 633 unsigned long tmp; 634 635 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 636 L1_CACHE_BYTES; 637 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); 638 return tmp & i_hash_mask; 639 } 640 641 /** 642 * __insert_inode_hash - hash an inode 643 * @inode: unhashed inode 644 * @hashval: unsigned long value used to locate this object in the 645 * inode_hashtable. 646 * 647 * Add an inode to the inode hash for this superblock. 648 */ 649 void __insert_inode_hash(struct inode *inode, unsigned long hashval) 650 { 651 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 652 653 spin_lock(&inode_hash_lock); 654 spin_lock(&inode->i_lock); 655 hlist_add_head_rcu(&inode->i_hash, b); 656 spin_unlock(&inode->i_lock); 657 spin_unlock(&inode_hash_lock); 658 } 659 EXPORT_SYMBOL(__insert_inode_hash); 660 661 /** 662 * __remove_inode_hash - remove an inode from the hash 663 * @inode: inode to unhash 664 * 665 * Remove an inode from the superblock. 666 */ 667 void __remove_inode_hash(struct inode *inode) 668 { 669 spin_lock(&inode_hash_lock); 670 spin_lock(&inode->i_lock); 671 hlist_del_init_rcu(&inode->i_hash); 672 spin_unlock(&inode->i_lock); 673 spin_unlock(&inode_hash_lock); 674 } 675 EXPORT_SYMBOL(__remove_inode_hash); 676 677 void dump_mapping(const struct address_space *mapping) 678 { 679 struct inode *host; 680 const struct address_space_operations *a_ops; 681 struct hlist_node *dentry_first; 682 struct dentry *dentry_ptr; 683 struct dentry dentry; 684 char fname[64] = {}; 685 unsigned long ino; 686 687 /* 688 * If mapping is an invalid pointer, we don't want to crash 689 * accessing it, so probe everything depending on it carefully. 690 */ 691 if (get_kernel_nofault(host, &mapping->host) || 692 get_kernel_nofault(a_ops, &mapping->a_ops)) { 693 pr_warn("invalid mapping:%px\n", mapping); 694 return; 695 } 696 697 if (!host) { 698 pr_warn("aops:%ps\n", a_ops); 699 return; 700 } 701 702 if (get_kernel_nofault(dentry_first, &host->i_dentry.first) || 703 get_kernel_nofault(ino, &host->i_ino)) { 704 pr_warn("aops:%ps invalid inode:%px\n", a_ops, host); 705 return; 706 } 707 708 if (!dentry_first) { 709 pr_warn("aops:%ps ino:%lx\n", a_ops, ino); 710 return; 711 } 712 713 dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias); 714 if (get_kernel_nofault(dentry, dentry_ptr) || 715 !dentry.d_parent || !dentry.d_name.name) { 716 pr_warn("aops:%ps ino:%lx invalid dentry:%px\n", 717 a_ops, ino, dentry_ptr); 718 return; 719 } 720 721 if (strncpy_from_kernel_nofault(fname, dentry.d_name.name, 63) < 0) 722 strscpy(fname, "<invalid>"); 723 /* 724 * Even if strncpy_from_kernel_nofault() succeeded, 725 * the fname could be unreliable 726 */ 727 pr_warn("aops:%ps ino:%lx dentry name(?):\"%s\"\n", 728 a_ops, ino, fname); 729 } 730 731 void clear_inode(struct inode *inode) 732 { 733 /* 734 * We have to cycle the i_pages lock here because reclaim can be in the 735 * process of removing the last page (in __filemap_remove_folio()) 736 * and we must not free the mapping under it. 737 */ 738 xa_lock_irq(&inode->i_data.i_pages); 739 BUG_ON(inode->i_data.nrpages); 740 /* 741 * Almost always, mapping_empty(&inode->i_data) here; but there are 742 * two known and long-standing ways in which nodes may get left behind 743 * (when deep radix-tree node allocation failed partway; or when THP 744 * collapse_file() failed). Until those two known cases are cleaned up, 745 * or a cleanup function is called here, do not BUG_ON(!mapping_empty), 746 * nor even WARN_ON(!mapping_empty). 747 */ 748 xa_unlock_irq(&inode->i_data.i_pages); 749 BUG_ON(!list_empty(&inode->i_data.i_private_list)); 750 BUG_ON(!(inode->i_state & I_FREEING)); 751 BUG_ON(inode->i_state & I_CLEAR); 752 BUG_ON(!list_empty(&inode->i_wb_list)); 753 /* don't need i_lock here, no concurrent mods to i_state */ 754 inode->i_state = I_FREEING | I_CLEAR; 755 } 756 EXPORT_SYMBOL(clear_inode); 757 758 /* 759 * Free the inode passed in, removing it from the lists it is still connected 760 * to. We remove any pages still attached to the inode and wait for any IO that 761 * is still in progress before finally destroying the inode. 762 * 763 * An inode must already be marked I_FREEING so that we avoid the inode being 764 * moved back onto lists if we race with other code that manipulates the lists 765 * (e.g. writeback_single_inode). The caller is responsible for setting this. 766 * 767 * An inode must already be removed from the LRU list before being evicted from 768 * the cache. This should occur atomically with setting the I_FREEING state 769 * flag, so no inodes here should ever be on the LRU when being evicted. 770 */ 771 static void evict(struct inode *inode) 772 { 773 const struct super_operations *op = inode->i_sb->s_op; 774 775 BUG_ON(!(inode->i_state & I_FREEING)); 776 BUG_ON(!list_empty(&inode->i_lru)); 777 778 if (!list_empty(&inode->i_io_list)) 779 inode_io_list_del(inode); 780 781 inode_sb_list_del(inode); 782 783 spin_lock(&inode->i_lock); 784 inode_wait_for_lru_isolating(inode); 785 786 /* 787 * Wait for flusher thread to be done with the inode so that filesystem 788 * does not start destroying it while writeback is still running. Since 789 * the inode has I_FREEING set, flusher thread won't start new work on 790 * the inode. We just have to wait for running writeback to finish. 791 */ 792 inode_wait_for_writeback(inode); 793 spin_unlock(&inode->i_lock); 794 795 if (op->evict_inode) { 796 op->evict_inode(inode); 797 } else { 798 truncate_inode_pages_final(&inode->i_data); 799 clear_inode(inode); 800 } 801 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 802 cd_forget(inode); 803 804 remove_inode_hash(inode); 805 806 /* 807 * Wake up waiters in __wait_on_freeing_inode(). 808 * 809 * Lockless hash lookup may end up finding the inode before we removed 810 * it above, but only lock it *after* we are done with the wakeup below. 811 * In this case the potential waiter cannot safely block. 812 * 813 * The inode being unhashed after the call to remove_inode_hash() is 814 * used as an indicator whether blocking on it is safe. 815 */ 816 spin_lock(&inode->i_lock); 817 /* 818 * Pairs with the barrier in prepare_to_wait_event() to make sure 819 * ___wait_var_event() either sees the bit cleared or 820 * waitqueue_active() check in wake_up_var() sees the waiter. 821 */ 822 smp_mb__after_spinlock(); 823 inode_wake_up_bit(inode, __I_NEW); 824 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 825 spin_unlock(&inode->i_lock); 826 827 destroy_inode(inode); 828 } 829 830 /* 831 * dispose_list - dispose of the contents of a local list 832 * @head: the head of the list to free 833 * 834 * Dispose-list gets a local list with local inodes in it, so it doesn't 835 * need to worry about list corruption and SMP locks. 836 */ 837 static void dispose_list(struct list_head *head) 838 { 839 while (!list_empty(head)) { 840 struct inode *inode; 841 842 inode = list_first_entry(head, struct inode, i_lru); 843 list_del_init(&inode->i_lru); 844 845 evict(inode); 846 cond_resched(); 847 } 848 } 849 850 /** 851 * evict_inodes - evict all evictable inodes for a superblock 852 * @sb: superblock to operate on 853 * 854 * Make sure that no inodes with zero refcount are retained. This is 855 * called by superblock shutdown after having SB_ACTIVE flag removed, 856 * so any inode reaching zero refcount during or after that call will 857 * be immediately evicted. 858 */ 859 void evict_inodes(struct super_block *sb) 860 { 861 struct inode *inode, *next; 862 LIST_HEAD(dispose); 863 864 again: 865 spin_lock(&sb->s_inode_list_lock); 866 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 867 if (atomic_read(&inode->i_count)) 868 continue; 869 870 spin_lock(&inode->i_lock); 871 if (atomic_read(&inode->i_count)) { 872 spin_unlock(&inode->i_lock); 873 continue; 874 } 875 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 876 spin_unlock(&inode->i_lock); 877 continue; 878 } 879 880 inode->i_state |= I_FREEING; 881 inode_lru_list_del(inode); 882 spin_unlock(&inode->i_lock); 883 list_add(&inode->i_lru, &dispose); 884 885 /* 886 * We can have a ton of inodes to evict at unmount time given 887 * enough memory, check to see if we need to go to sleep for a 888 * bit so we don't livelock. 889 */ 890 if (need_resched()) { 891 spin_unlock(&sb->s_inode_list_lock); 892 cond_resched(); 893 dispose_list(&dispose); 894 goto again; 895 } 896 } 897 spin_unlock(&sb->s_inode_list_lock); 898 899 dispose_list(&dispose); 900 } 901 EXPORT_SYMBOL_GPL(evict_inodes); 902 903 /** 904 * invalidate_inodes - attempt to free all inodes on a superblock 905 * @sb: superblock to operate on 906 * 907 * Attempts to free all inodes (including dirty inodes) for a given superblock. 908 */ 909 void invalidate_inodes(struct super_block *sb) 910 { 911 struct inode *inode, *next; 912 LIST_HEAD(dispose); 913 914 again: 915 spin_lock(&sb->s_inode_list_lock); 916 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 917 spin_lock(&inode->i_lock); 918 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 919 spin_unlock(&inode->i_lock); 920 continue; 921 } 922 if (atomic_read(&inode->i_count)) { 923 spin_unlock(&inode->i_lock); 924 continue; 925 } 926 927 inode->i_state |= I_FREEING; 928 inode_lru_list_del(inode); 929 spin_unlock(&inode->i_lock); 930 list_add(&inode->i_lru, &dispose); 931 if (need_resched()) { 932 spin_unlock(&sb->s_inode_list_lock); 933 cond_resched(); 934 dispose_list(&dispose); 935 goto again; 936 } 937 } 938 spin_unlock(&sb->s_inode_list_lock); 939 940 dispose_list(&dispose); 941 } 942 943 /* 944 * Isolate the inode from the LRU in preparation for freeing it. 945 * 946 * If the inode has the I_REFERENCED flag set, then it means that it has been 947 * used recently - the flag is set in iput_final(). When we encounter such an 948 * inode, clear the flag and move it to the back of the LRU so it gets another 949 * pass through the LRU before it gets reclaimed. This is necessary because of 950 * the fact we are doing lazy LRU updates to minimise lock contention so the 951 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 952 * with this flag set because they are the inodes that are out of order. 953 */ 954 static enum lru_status inode_lru_isolate(struct list_head *item, 955 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 956 { 957 struct list_head *freeable = arg; 958 struct inode *inode = container_of(item, struct inode, i_lru); 959 960 /* 961 * We are inverting the lru lock/inode->i_lock here, so use a 962 * trylock. If we fail to get the lock, just skip it. 963 */ 964 if (!spin_trylock(&inode->i_lock)) 965 return LRU_SKIP; 966 967 /* 968 * Inodes can get referenced, redirtied, or repopulated while 969 * they're already on the LRU, and this can make them 970 * unreclaimable for a while. Remove them lazily here; iput, 971 * sync, or the last page cache deletion will requeue them. 972 */ 973 if (atomic_read(&inode->i_count) || 974 (inode->i_state & ~I_REFERENCED) || 975 !mapping_shrinkable(&inode->i_data)) { 976 list_lru_isolate(lru, &inode->i_lru); 977 spin_unlock(&inode->i_lock); 978 this_cpu_dec(nr_unused); 979 return LRU_REMOVED; 980 } 981 982 /* Recently referenced inodes get one more pass */ 983 if (inode->i_state & I_REFERENCED) { 984 inode->i_state &= ~I_REFERENCED; 985 spin_unlock(&inode->i_lock); 986 return LRU_ROTATE; 987 } 988 989 /* 990 * On highmem systems, mapping_shrinkable() permits dropping 991 * page cache in order to free up struct inodes: lowmem might 992 * be under pressure before the cache inside the highmem zone. 993 */ 994 if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) { 995 inode_pin_lru_isolating(inode); 996 spin_unlock(&inode->i_lock); 997 spin_unlock(lru_lock); 998 if (remove_inode_buffers(inode)) { 999 unsigned long reap; 1000 reap = invalidate_mapping_pages(&inode->i_data, 0, -1); 1001 if (current_is_kswapd()) 1002 __count_vm_events(KSWAPD_INODESTEAL, reap); 1003 else 1004 __count_vm_events(PGINODESTEAL, reap); 1005 mm_account_reclaimed_pages(reap); 1006 } 1007 inode_unpin_lru_isolating(inode); 1008 spin_lock(lru_lock); 1009 return LRU_RETRY; 1010 } 1011 1012 WARN_ON(inode->i_state & I_NEW); 1013 inode->i_state |= I_FREEING; 1014 list_lru_isolate_move(lru, &inode->i_lru, freeable); 1015 spin_unlock(&inode->i_lock); 1016 1017 this_cpu_dec(nr_unused); 1018 return LRU_REMOVED; 1019 } 1020 1021 /* 1022 * Walk the superblock inode LRU for freeable inodes and attempt to free them. 1023 * This is called from the superblock shrinker function with a number of inodes 1024 * to trim from the LRU. Inodes to be freed are moved to a temporary list and 1025 * then are freed outside inode_lock by dispose_list(). 1026 */ 1027 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc) 1028 { 1029 LIST_HEAD(freeable); 1030 long freed; 1031 1032 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc, 1033 inode_lru_isolate, &freeable); 1034 dispose_list(&freeable); 1035 return freed; 1036 } 1037 1038 static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked); 1039 /* 1040 * Called with the inode lock held. 1041 */ 1042 static struct inode *find_inode(struct super_block *sb, 1043 struct hlist_head *head, 1044 int (*test)(struct inode *, void *), 1045 void *data, bool is_inode_hash_locked) 1046 { 1047 struct inode *inode = NULL; 1048 1049 if (is_inode_hash_locked) 1050 lockdep_assert_held(&inode_hash_lock); 1051 else 1052 lockdep_assert_not_held(&inode_hash_lock); 1053 1054 rcu_read_lock(); 1055 repeat: 1056 hlist_for_each_entry_rcu(inode, head, i_hash) { 1057 if (inode->i_sb != sb) 1058 continue; 1059 if (!test(inode, data)) 1060 continue; 1061 spin_lock(&inode->i_lock); 1062 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 1063 __wait_on_freeing_inode(inode, is_inode_hash_locked); 1064 goto repeat; 1065 } 1066 if (unlikely(inode->i_state & I_CREATING)) { 1067 spin_unlock(&inode->i_lock); 1068 rcu_read_unlock(); 1069 return ERR_PTR(-ESTALE); 1070 } 1071 __iget(inode); 1072 spin_unlock(&inode->i_lock); 1073 rcu_read_unlock(); 1074 return inode; 1075 } 1076 rcu_read_unlock(); 1077 return NULL; 1078 } 1079 1080 /* 1081 * find_inode_fast is the fast path version of find_inode, see the comment at 1082 * iget_locked for details. 1083 */ 1084 static struct inode *find_inode_fast(struct super_block *sb, 1085 struct hlist_head *head, unsigned long ino, 1086 bool is_inode_hash_locked) 1087 { 1088 struct inode *inode = NULL; 1089 1090 if (is_inode_hash_locked) 1091 lockdep_assert_held(&inode_hash_lock); 1092 else 1093 lockdep_assert_not_held(&inode_hash_lock); 1094 1095 rcu_read_lock(); 1096 repeat: 1097 hlist_for_each_entry_rcu(inode, head, i_hash) { 1098 if (inode->i_ino != ino) 1099 continue; 1100 if (inode->i_sb != sb) 1101 continue; 1102 spin_lock(&inode->i_lock); 1103 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 1104 __wait_on_freeing_inode(inode, is_inode_hash_locked); 1105 goto repeat; 1106 } 1107 if (unlikely(inode->i_state & I_CREATING)) { 1108 spin_unlock(&inode->i_lock); 1109 rcu_read_unlock(); 1110 return ERR_PTR(-ESTALE); 1111 } 1112 __iget(inode); 1113 spin_unlock(&inode->i_lock); 1114 rcu_read_unlock(); 1115 return inode; 1116 } 1117 rcu_read_unlock(); 1118 return NULL; 1119 } 1120 1121 /* 1122 * Each cpu owns a range of LAST_INO_BATCH numbers. 1123 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, 1124 * to renew the exhausted range. 1125 * 1126 * This does not significantly increase overflow rate because every CPU can 1127 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is 1128 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the 1129 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase 1130 * overflow rate by 2x, which does not seem too significant. 1131 * 1132 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 1133 * error if st_ino won't fit in target struct field. Use 32bit counter 1134 * here to attempt to avoid that. 1135 */ 1136 #define LAST_INO_BATCH 1024 1137 static DEFINE_PER_CPU(unsigned int, last_ino); 1138 1139 unsigned int get_next_ino(void) 1140 { 1141 unsigned int *p = &get_cpu_var(last_ino); 1142 unsigned int res = *p; 1143 1144 #ifdef CONFIG_SMP 1145 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { 1146 static atomic_t shared_last_ino; 1147 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); 1148 1149 res = next - LAST_INO_BATCH; 1150 } 1151 #endif 1152 1153 res++; 1154 /* get_next_ino should not provide a 0 inode number */ 1155 if (unlikely(!res)) 1156 res++; 1157 *p = res; 1158 put_cpu_var(last_ino); 1159 return res; 1160 } 1161 EXPORT_SYMBOL(get_next_ino); 1162 1163 /** 1164 * new_inode_pseudo - obtain an inode 1165 * @sb: superblock 1166 * 1167 * Allocates a new inode for given superblock. 1168 * Inode wont be chained in superblock s_inodes list 1169 * This means : 1170 * - fs can't be unmount 1171 * - quotas, fsnotify, writeback can't work 1172 */ 1173 struct inode *new_inode_pseudo(struct super_block *sb) 1174 { 1175 return alloc_inode(sb); 1176 } 1177 1178 /** 1179 * new_inode - obtain an inode 1180 * @sb: superblock 1181 * 1182 * Allocates a new inode for given superblock. The default gfp_mask 1183 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. 1184 * If HIGHMEM pages are unsuitable or it is known that pages allocated 1185 * for the page cache are not reclaimable or migratable, 1186 * mapping_set_gfp_mask() must be called with suitable flags on the 1187 * newly created inode's mapping 1188 * 1189 */ 1190 struct inode *new_inode(struct super_block *sb) 1191 { 1192 struct inode *inode; 1193 1194 inode = new_inode_pseudo(sb); 1195 if (inode) 1196 inode_sb_list_add(inode); 1197 return inode; 1198 } 1199 EXPORT_SYMBOL(new_inode); 1200 1201 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1202 void lockdep_annotate_inode_mutex_key(struct inode *inode) 1203 { 1204 if (S_ISDIR(inode->i_mode)) { 1205 struct file_system_type *type = inode->i_sb->s_type; 1206 1207 /* Set new key only if filesystem hasn't already changed it */ 1208 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) { 1209 /* 1210 * ensure nobody is actually holding i_mutex 1211 */ 1212 // mutex_destroy(&inode->i_mutex); 1213 init_rwsem(&inode->i_rwsem); 1214 lockdep_set_class(&inode->i_rwsem, 1215 &type->i_mutex_dir_key); 1216 } 1217 } 1218 } 1219 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); 1220 #endif 1221 1222 /** 1223 * unlock_new_inode - clear the I_NEW state and wake up any waiters 1224 * @inode: new inode to unlock 1225 * 1226 * Called when the inode is fully initialised to clear the new state of the 1227 * inode and wake up anyone waiting for the inode to finish initialisation. 1228 */ 1229 void unlock_new_inode(struct inode *inode) 1230 { 1231 lockdep_annotate_inode_mutex_key(inode); 1232 spin_lock(&inode->i_lock); 1233 WARN_ON(!(inode->i_state & I_NEW)); 1234 inode->i_state &= ~I_NEW & ~I_CREATING; 1235 /* 1236 * Pairs with the barrier in prepare_to_wait_event() to make sure 1237 * ___wait_var_event() either sees the bit cleared or 1238 * waitqueue_active() check in wake_up_var() sees the waiter. 1239 */ 1240 smp_mb(); 1241 inode_wake_up_bit(inode, __I_NEW); 1242 spin_unlock(&inode->i_lock); 1243 } 1244 EXPORT_SYMBOL(unlock_new_inode); 1245 1246 void discard_new_inode(struct inode *inode) 1247 { 1248 lockdep_annotate_inode_mutex_key(inode); 1249 spin_lock(&inode->i_lock); 1250 WARN_ON(!(inode->i_state & I_NEW)); 1251 inode->i_state &= ~I_NEW; 1252 /* 1253 * Pairs with the barrier in prepare_to_wait_event() to make sure 1254 * ___wait_var_event() either sees the bit cleared or 1255 * waitqueue_active() check in wake_up_var() sees the waiter. 1256 */ 1257 smp_mb(); 1258 inode_wake_up_bit(inode, __I_NEW); 1259 spin_unlock(&inode->i_lock); 1260 iput(inode); 1261 } 1262 EXPORT_SYMBOL(discard_new_inode); 1263 1264 /** 1265 * lock_two_nondirectories - take two i_mutexes on non-directory objects 1266 * 1267 * Lock any non-NULL argument. Passed objects must not be directories. 1268 * Zero, one or two objects may be locked by this function. 1269 * 1270 * @inode1: first inode to lock 1271 * @inode2: second inode to lock 1272 */ 1273 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2) 1274 { 1275 if (inode1) 1276 WARN_ON_ONCE(S_ISDIR(inode1->i_mode)); 1277 if (inode2) 1278 WARN_ON_ONCE(S_ISDIR(inode2->i_mode)); 1279 if (inode1 > inode2) 1280 swap(inode1, inode2); 1281 if (inode1) 1282 inode_lock(inode1); 1283 if (inode2 && inode2 != inode1) 1284 inode_lock_nested(inode2, I_MUTEX_NONDIR2); 1285 } 1286 EXPORT_SYMBOL(lock_two_nondirectories); 1287 1288 /** 1289 * unlock_two_nondirectories - release locks from lock_two_nondirectories() 1290 * @inode1: first inode to unlock 1291 * @inode2: second inode to unlock 1292 */ 1293 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2) 1294 { 1295 if (inode1) { 1296 WARN_ON_ONCE(S_ISDIR(inode1->i_mode)); 1297 inode_unlock(inode1); 1298 } 1299 if (inode2 && inode2 != inode1) { 1300 WARN_ON_ONCE(S_ISDIR(inode2->i_mode)); 1301 inode_unlock(inode2); 1302 } 1303 } 1304 EXPORT_SYMBOL(unlock_two_nondirectories); 1305 1306 /** 1307 * inode_insert5 - obtain an inode from a mounted file system 1308 * @inode: pre-allocated inode to use for insert to cache 1309 * @hashval: hash value (usually inode number) to get 1310 * @test: callback used for comparisons between inodes 1311 * @set: callback used to initialize a new struct inode 1312 * @data: opaque data pointer to pass to @test and @set 1313 * 1314 * Search for the inode specified by @hashval and @data in the inode cache, 1315 * and if present return it with an increased reference count. This is a 1316 * variant of iget5_locked() that doesn't allocate an inode. 1317 * 1318 * If the inode is not present in the cache, insert the pre-allocated inode and 1319 * return it locked, hashed, and with the I_NEW flag set. The file system gets 1320 * to fill it in before unlocking it via unlock_new_inode(). 1321 * 1322 * Note that both @test and @set are called with the inode_hash_lock held, so 1323 * they can't sleep. 1324 */ 1325 struct inode *inode_insert5(struct inode *inode, unsigned long hashval, 1326 int (*test)(struct inode *, void *), 1327 int (*set)(struct inode *, void *), void *data) 1328 { 1329 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval); 1330 struct inode *old; 1331 1332 again: 1333 spin_lock(&inode_hash_lock); 1334 old = find_inode(inode->i_sb, head, test, data, true); 1335 if (unlikely(old)) { 1336 /* 1337 * Uhhuh, somebody else created the same inode under us. 1338 * Use the old inode instead of the preallocated one. 1339 */ 1340 spin_unlock(&inode_hash_lock); 1341 if (IS_ERR(old)) 1342 return NULL; 1343 wait_on_inode(old); 1344 if (unlikely(inode_unhashed(old))) { 1345 iput(old); 1346 goto again; 1347 } 1348 return old; 1349 } 1350 1351 if (set && unlikely(set(inode, data))) { 1352 inode = NULL; 1353 goto unlock; 1354 } 1355 1356 /* 1357 * Return the locked inode with I_NEW set, the 1358 * caller is responsible for filling in the contents 1359 */ 1360 spin_lock(&inode->i_lock); 1361 inode->i_state |= I_NEW; 1362 hlist_add_head_rcu(&inode->i_hash, head); 1363 spin_unlock(&inode->i_lock); 1364 1365 /* 1366 * Add inode to the sb list if it's not already. It has I_NEW at this 1367 * point, so it should be safe to test i_sb_list locklessly. 1368 */ 1369 if (list_empty(&inode->i_sb_list)) 1370 inode_sb_list_add(inode); 1371 unlock: 1372 spin_unlock(&inode_hash_lock); 1373 1374 return inode; 1375 } 1376 EXPORT_SYMBOL(inode_insert5); 1377 1378 /** 1379 * iget5_locked - obtain an inode from a mounted file system 1380 * @sb: super block of file system 1381 * @hashval: hash value (usually inode number) to get 1382 * @test: callback used for comparisons between inodes 1383 * @set: callback used to initialize a new struct inode 1384 * @data: opaque data pointer to pass to @test and @set 1385 * 1386 * Search for the inode specified by @hashval and @data in the inode cache, 1387 * and if present return it with an increased reference count. This is a 1388 * generalized version of iget_locked() for file systems where the inode 1389 * number is not sufficient for unique identification of an inode. 1390 * 1391 * If the inode is not present in the cache, allocate and insert a new inode 1392 * and return it locked, hashed, and with the I_NEW flag set. The file system 1393 * gets to fill it in before unlocking it via unlock_new_inode(). 1394 * 1395 * Note that both @test and @set are called with the inode_hash_lock held, so 1396 * they can't sleep. 1397 */ 1398 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 1399 int (*test)(struct inode *, void *), 1400 int (*set)(struct inode *, void *), void *data) 1401 { 1402 struct inode *inode = ilookup5(sb, hashval, test, data); 1403 1404 if (!inode) { 1405 struct inode *new = alloc_inode(sb); 1406 1407 if (new) { 1408 inode = inode_insert5(new, hashval, test, set, data); 1409 if (unlikely(inode != new)) 1410 destroy_inode(new); 1411 } 1412 } 1413 return inode; 1414 } 1415 EXPORT_SYMBOL(iget5_locked); 1416 1417 /** 1418 * iget5_locked_rcu - obtain an inode from a mounted file system 1419 * @sb: super block of file system 1420 * @hashval: hash value (usually inode number) to get 1421 * @test: callback used for comparisons between inodes 1422 * @set: callback used to initialize a new struct inode 1423 * @data: opaque data pointer to pass to @test and @set 1424 * 1425 * This is equivalent to iget5_locked, except the @test callback must 1426 * tolerate the inode not being stable, including being mid-teardown. 1427 */ 1428 struct inode *iget5_locked_rcu(struct super_block *sb, unsigned long hashval, 1429 int (*test)(struct inode *, void *), 1430 int (*set)(struct inode *, void *), void *data) 1431 { 1432 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1433 struct inode *inode, *new; 1434 1435 again: 1436 inode = find_inode(sb, head, test, data, false); 1437 if (inode) { 1438 if (IS_ERR(inode)) 1439 return NULL; 1440 wait_on_inode(inode); 1441 if (unlikely(inode_unhashed(inode))) { 1442 iput(inode); 1443 goto again; 1444 } 1445 return inode; 1446 } 1447 1448 new = alloc_inode(sb); 1449 if (new) { 1450 inode = inode_insert5(new, hashval, test, set, data); 1451 if (unlikely(inode != new)) 1452 destroy_inode(new); 1453 } 1454 return inode; 1455 } 1456 EXPORT_SYMBOL_GPL(iget5_locked_rcu); 1457 1458 /** 1459 * iget_locked - obtain an inode from a mounted file system 1460 * @sb: super block of file system 1461 * @ino: inode number to get 1462 * 1463 * Search for the inode specified by @ino in the inode cache and if present 1464 * return it with an increased reference count. This is for file systems 1465 * where the inode number is sufficient for unique identification of an inode. 1466 * 1467 * If the inode is not in cache, allocate a new inode and return it locked, 1468 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1469 * before unlocking it via unlock_new_inode(). 1470 */ 1471 struct inode *iget_locked(struct super_block *sb, unsigned long ino) 1472 { 1473 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1474 struct inode *inode; 1475 again: 1476 inode = find_inode_fast(sb, head, ino, false); 1477 if (inode) { 1478 if (IS_ERR(inode)) 1479 return NULL; 1480 wait_on_inode(inode); 1481 if (unlikely(inode_unhashed(inode))) { 1482 iput(inode); 1483 goto again; 1484 } 1485 return inode; 1486 } 1487 1488 inode = alloc_inode(sb); 1489 if (inode) { 1490 struct inode *old; 1491 1492 spin_lock(&inode_hash_lock); 1493 /* We released the lock, so.. */ 1494 old = find_inode_fast(sb, head, ino, true); 1495 if (!old) { 1496 inode->i_ino = ino; 1497 spin_lock(&inode->i_lock); 1498 inode->i_state = I_NEW; 1499 hlist_add_head_rcu(&inode->i_hash, head); 1500 spin_unlock(&inode->i_lock); 1501 inode_sb_list_add(inode); 1502 spin_unlock(&inode_hash_lock); 1503 1504 /* Return the locked inode with I_NEW set, the 1505 * caller is responsible for filling in the contents 1506 */ 1507 return inode; 1508 } 1509 1510 /* 1511 * Uhhuh, somebody else created the same inode under 1512 * us. Use the old inode instead of the one we just 1513 * allocated. 1514 */ 1515 spin_unlock(&inode_hash_lock); 1516 destroy_inode(inode); 1517 if (IS_ERR(old)) 1518 return NULL; 1519 inode = old; 1520 wait_on_inode(inode); 1521 if (unlikely(inode_unhashed(inode))) { 1522 iput(inode); 1523 goto again; 1524 } 1525 } 1526 return inode; 1527 } 1528 EXPORT_SYMBOL(iget_locked); 1529 1530 /* 1531 * search the inode cache for a matching inode number. 1532 * If we find one, then the inode number we are trying to 1533 * allocate is not unique and so we should not use it. 1534 * 1535 * Returns 1 if the inode number is unique, 0 if it is not. 1536 */ 1537 static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1538 { 1539 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1540 struct inode *inode; 1541 1542 hlist_for_each_entry_rcu(inode, b, i_hash) { 1543 if (inode->i_ino == ino && inode->i_sb == sb) 1544 return 0; 1545 } 1546 return 1; 1547 } 1548 1549 /** 1550 * iunique - get a unique inode number 1551 * @sb: superblock 1552 * @max_reserved: highest reserved inode number 1553 * 1554 * Obtain an inode number that is unique on the system for a given 1555 * superblock. This is used by file systems that have no natural 1556 * permanent inode numbering system. An inode number is returned that 1557 * is higher than the reserved limit but unique. 1558 * 1559 * BUGS: 1560 * With a large number of inodes live on the file system this function 1561 * currently becomes quite slow. 1562 */ 1563 ino_t iunique(struct super_block *sb, ino_t max_reserved) 1564 { 1565 /* 1566 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 1567 * error if st_ino won't fit in target struct field. Use 32bit counter 1568 * here to attempt to avoid that. 1569 */ 1570 static DEFINE_SPINLOCK(iunique_lock); 1571 static unsigned int counter; 1572 ino_t res; 1573 1574 rcu_read_lock(); 1575 spin_lock(&iunique_lock); 1576 do { 1577 if (counter <= max_reserved) 1578 counter = max_reserved + 1; 1579 res = counter++; 1580 } while (!test_inode_iunique(sb, res)); 1581 spin_unlock(&iunique_lock); 1582 rcu_read_unlock(); 1583 1584 return res; 1585 } 1586 EXPORT_SYMBOL(iunique); 1587 1588 struct inode *igrab(struct inode *inode) 1589 { 1590 spin_lock(&inode->i_lock); 1591 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1592 __iget(inode); 1593 spin_unlock(&inode->i_lock); 1594 } else { 1595 spin_unlock(&inode->i_lock); 1596 /* 1597 * Handle the case where s_op->clear_inode is not been 1598 * called yet, and somebody is calling igrab 1599 * while the inode is getting freed. 1600 */ 1601 inode = NULL; 1602 } 1603 return inode; 1604 } 1605 EXPORT_SYMBOL(igrab); 1606 1607 /** 1608 * ilookup5_nowait - search for an inode in the inode cache 1609 * @sb: super block of file system to search 1610 * @hashval: hash value (usually inode number) to search for 1611 * @test: callback used for comparisons between inodes 1612 * @data: opaque data pointer to pass to @test 1613 * 1614 * Search for the inode specified by @hashval and @data in the inode cache. 1615 * If the inode is in the cache, the inode is returned with an incremented 1616 * reference count. 1617 * 1618 * Note: I_NEW is not waited upon so you have to be very careful what you do 1619 * with the returned inode. You probably should be using ilookup5() instead. 1620 * 1621 * Note2: @test is called with the inode_hash_lock held, so can't sleep. 1622 */ 1623 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1624 int (*test)(struct inode *, void *), void *data) 1625 { 1626 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1627 struct inode *inode; 1628 1629 spin_lock(&inode_hash_lock); 1630 inode = find_inode(sb, head, test, data, true); 1631 spin_unlock(&inode_hash_lock); 1632 1633 return IS_ERR(inode) ? NULL : inode; 1634 } 1635 EXPORT_SYMBOL(ilookup5_nowait); 1636 1637 /** 1638 * ilookup5 - search for an inode in the inode cache 1639 * @sb: super block of file system to search 1640 * @hashval: hash value (usually inode number) to search for 1641 * @test: callback used for comparisons between inodes 1642 * @data: opaque data pointer to pass to @test 1643 * 1644 * Search for the inode specified by @hashval and @data in the inode cache, 1645 * and if the inode is in the cache, return the inode with an incremented 1646 * reference count. Waits on I_NEW before returning the inode. 1647 * returned with an incremented reference count. 1648 * 1649 * This is a generalized version of ilookup() for file systems where the 1650 * inode number is not sufficient for unique identification of an inode. 1651 * 1652 * Note: @test is called with the inode_hash_lock held, so can't sleep. 1653 */ 1654 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1655 int (*test)(struct inode *, void *), void *data) 1656 { 1657 struct inode *inode; 1658 again: 1659 inode = ilookup5_nowait(sb, hashval, test, data); 1660 if (inode) { 1661 wait_on_inode(inode); 1662 if (unlikely(inode_unhashed(inode))) { 1663 iput(inode); 1664 goto again; 1665 } 1666 } 1667 return inode; 1668 } 1669 EXPORT_SYMBOL(ilookup5); 1670 1671 /** 1672 * ilookup - search for an inode in the inode cache 1673 * @sb: super block of file system to search 1674 * @ino: inode number to search for 1675 * 1676 * Search for the inode @ino in the inode cache, and if the inode is in the 1677 * cache, the inode is returned with an incremented reference count. 1678 */ 1679 struct inode *ilookup(struct super_block *sb, unsigned long ino) 1680 { 1681 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1682 struct inode *inode; 1683 again: 1684 inode = find_inode_fast(sb, head, ino, false); 1685 1686 if (inode) { 1687 if (IS_ERR(inode)) 1688 return NULL; 1689 wait_on_inode(inode); 1690 if (unlikely(inode_unhashed(inode))) { 1691 iput(inode); 1692 goto again; 1693 } 1694 } 1695 return inode; 1696 } 1697 EXPORT_SYMBOL(ilookup); 1698 1699 /** 1700 * find_inode_nowait - find an inode in the inode cache 1701 * @sb: super block of file system to search 1702 * @hashval: hash value (usually inode number) to search for 1703 * @match: callback used for comparisons between inodes 1704 * @data: opaque data pointer to pass to @match 1705 * 1706 * Search for the inode specified by @hashval and @data in the inode 1707 * cache, where the helper function @match will return 0 if the inode 1708 * does not match, 1 if the inode does match, and -1 if the search 1709 * should be stopped. The @match function must be responsible for 1710 * taking the i_lock spin_lock and checking i_state for an inode being 1711 * freed or being initialized, and incrementing the reference count 1712 * before returning 1. It also must not sleep, since it is called with 1713 * the inode_hash_lock spinlock held. 1714 * 1715 * This is a even more generalized version of ilookup5() when the 1716 * function must never block --- find_inode() can block in 1717 * __wait_on_freeing_inode() --- or when the caller can not increment 1718 * the reference count because the resulting iput() might cause an 1719 * inode eviction. The tradeoff is that the @match funtion must be 1720 * very carefully implemented. 1721 */ 1722 struct inode *find_inode_nowait(struct super_block *sb, 1723 unsigned long hashval, 1724 int (*match)(struct inode *, unsigned long, 1725 void *), 1726 void *data) 1727 { 1728 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1729 struct inode *inode, *ret_inode = NULL; 1730 int mval; 1731 1732 spin_lock(&inode_hash_lock); 1733 hlist_for_each_entry(inode, head, i_hash) { 1734 if (inode->i_sb != sb) 1735 continue; 1736 mval = match(inode, hashval, data); 1737 if (mval == 0) 1738 continue; 1739 if (mval == 1) 1740 ret_inode = inode; 1741 goto out; 1742 } 1743 out: 1744 spin_unlock(&inode_hash_lock); 1745 return ret_inode; 1746 } 1747 EXPORT_SYMBOL(find_inode_nowait); 1748 1749 /** 1750 * find_inode_rcu - find an inode in the inode cache 1751 * @sb: Super block of file system to search 1752 * @hashval: Key to hash 1753 * @test: Function to test match on an inode 1754 * @data: Data for test function 1755 * 1756 * Search for the inode specified by @hashval and @data in the inode cache, 1757 * where the helper function @test will return 0 if the inode does not match 1758 * and 1 if it does. The @test function must be responsible for taking the 1759 * i_lock spin_lock and checking i_state for an inode being freed or being 1760 * initialized. 1761 * 1762 * If successful, this will return the inode for which the @test function 1763 * returned 1 and NULL otherwise. 1764 * 1765 * The @test function is not permitted to take a ref on any inode presented. 1766 * It is also not permitted to sleep. 1767 * 1768 * The caller must hold the RCU read lock. 1769 */ 1770 struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval, 1771 int (*test)(struct inode *, void *), void *data) 1772 { 1773 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1774 struct inode *inode; 1775 1776 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 1777 "suspicious find_inode_rcu() usage"); 1778 1779 hlist_for_each_entry_rcu(inode, head, i_hash) { 1780 if (inode->i_sb == sb && 1781 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) && 1782 test(inode, data)) 1783 return inode; 1784 } 1785 return NULL; 1786 } 1787 EXPORT_SYMBOL(find_inode_rcu); 1788 1789 /** 1790 * find_inode_by_ino_rcu - Find an inode in the inode cache 1791 * @sb: Super block of file system to search 1792 * @ino: The inode number to match 1793 * 1794 * Search for the inode specified by @hashval and @data in the inode cache, 1795 * where the helper function @test will return 0 if the inode does not match 1796 * and 1 if it does. The @test function must be responsible for taking the 1797 * i_lock spin_lock and checking i_state for an inode being freed or being 1798 * initialized. 1799 * 1800 * If successful, this will return the inode for which the @test function 1801 * returned 1 and NULL otherwise. 1802 * 1803 * The @test function is not permitted to take a ref on any inode presented. 1804 * It is also not permitted to sleep. 1805 * 1806 * The caller must hold the RCU read lock. 1807 */ 1808 struct inode *find_inode_by_ino_rcu(struct super_block *sb, 1809 unsigned long ino) 1810 { 1811 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1812 struct inode *inode; 1813 1814 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 1815 "suspicious find_inode_by_ino_rcu() usage"); 1816 1817 hlist_for_each_entry_rcu(inode, head, i_hash) { 1818 if (inode->i_ino == ino && 1819 inode->i_sb == sb && 1820 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE))) 1821 return inode; 1822 } 1823 return NULL; 1824 } 1825 EXPORT_SYMBOL(find_inode_by_ino_rcu); 1826 1827 int insert_inode_locked(struct inode *inode) 1828 { 1829 struct super_block *sb = inode->i_sb; 1830 ino_t ino = inode->i_ino; 1831 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1832 1833 while (1) { 1834 struct inode *old = NULL; 1835 spin_lock(&inode_hash_lock); 1836 hlist_for_each_entry(old, head, i_hash) { 1837 if (old->i_ino != ino) 1838 continue; 1839 if (old->i_sb != sb) 1840 continue; 1841 spin_lock(&old->i_lock); 1842 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1843 spin_unlock(&old->i_lock); 1844 continue; 1845 } 1846 break; 1847 } 1848 if (likely(!old)) { 1849 spin_lock(&inode->i_lock); 1850 inode->i_state |= I_NEW | I_CREATING; 1851 hlist_add_head_rcu(&inode->i_hash, head); 1852 spin_unlock(&inode->i_lock); 1853 spin_unlock(&inode_hash_lock); 1854 return 0; 1855 } 1856 if (unlikely(old->i_state & I_CREATING)) { 1857 spin_unlock(&old->i_lock); 1858 spin_unlock(&inode_hash_lock); 1859 return -EBUSY; 1860 } 1861 __iget(old); 1862 spin_unlock(&old->i_lock); 1863 spin_unlock(&inode_hash_lock); 1864 wait_on_inode(old); 1865 if (unlikely(!inode_unhashed(old))) { 1866 iput(old); 1867 return -EBUSY; 1868 } 1869 iput(old); 1870 } 1871 } 1872 EXPORT_SYMBOL(insert_inode_locked); 1873 1874 int insert_inode_locked4(struct inode *inode, unsigned long hashval, 1875 int (*test)(struct inode *, void *), void *data) 1876 { 1877 struct inode *old; 1878 1879 inode->i_state |= I_CREATING; 1880 old = inode_insert5(inode, hashval, test, NULL, data); 1881 1882 if (old != inode) { 1883 iput(old); 1884 return -EBUSY; 1885 } 1886 return 0; 1887 } 1888 EXPORT_SYMBOL(insert_inode_locked4); 1889 1890 1891 int generic_delete_inode(struct inode *inode) 1892 { 1893 return 1; 1894 } 1895 EXPORT_SYMBOL(generic_delete_inode); 1896 1897 /* 1898 * Called when we're dropping the last reference 1899 * to an inode. 1900 * 1901 * Call the FS "drop_inode()" function, defaulting to 1902 * the legacy UNIX filesystem behaviour. If it tells 1903 * us to evict inode, do so. Otherwise, retain inode 1904 * in cache if fs is alive, sync and evict if fs is 1905 * shutting down. 1906 */ 1907 static void iput_final(struct inode *inode) 1908 { 1909 struct super_block *sb = inode->i_sb; 1910 const struct super_operations *op = inode->i_sb->s_op; 1911 unsigned long state; 1912 int drop; 1913 1914 WARN_ON(inode->i_state & I_NEW); 1915 1916 if (op->drop_inode) 1917 drop = op->drop_inode(inode); 1918 else 1919 drop = generic_drop_inode(inode); 1920 1921 if (!drop && 1922 !(inode->i_state & I_DONTCACHE) && 1923 (sb->s_flags & SB_ACTIVE)) { 1924 __inode_add_lru(inode, true); 1925 spin_unlock(&inode->i_lock); 1926 return; 1927 } 1928 1929 state = inode->i_state; 1930 if (!drop) { 1931 WRITE_ONCE(inode->i_state, state | I_WILL_FREE); 1932 spin_unlock(&inode->i_lock); 1933 1934 write_inode_now(inode, 1); 1935 1936 spin_lock(&inode->i_lock); 1937 state = inode->i_state; 1938 WARN_ON(state & I_NEW); 1939 state &= ~I_WILL_FREE; 1940 } 1941 1942 WRITE_ONCE(inode->i_state, state | I_FREEING); 1943 if (!list_empty(&inode->i_lru)) 1944 inode_lru_list_del(inode); 1945 spin_unlock(&inode->i_lock); 1946 1947 evict(inode); 1948 } 1949 1950 /** 1951 * iput - put an inode 1952 * @inode: inode to put 1953 * 1954 * Puts an inode, dropping its usage count. If the inode use count hits 1955 * zero, the inode is then freed and may also be destroyed. 1956 * 1957 * Consequently, iput() can sleep. 1958 */ 1959 void iput(struct inode *inode) 1960 { 1961 if (!inode) 1962 return; 1963 BUG_ON(inode->i_state & I_CLEAR); 1964 retry: 1965 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { 1966 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { 1967 atomic_inc(&inode->i_count); 1968 spin_unlock(&inode->i_lock); 1969 trace_writeback_lazytime_iput(inode); 1970 mark_inode_dirty_sync(inode); 1971 goto retry; 1972 } 1973 iput_final(inode); 1974 } 1975 } 1976 EXPORT_SYMBOL(iput); 1977 1978 #ifdef CONFIG_BLOCK 1979 /** 1980 * bmap - find a block number in a file 1981 * @inode: inode owning the block number being requested 1982 * @block: pointer containing the block to find 1983 * 1984 * Replaces the value in ``*block`` with the block number on the device holding 1985 * corresponding to the requested block number in the file. 1986 * That is, asked for block 4 of inode 1 the function will replace the 1987 * 4 in ``*block``, with disk block relative to the disk start that holds that 1988 * block of the file. 1989 * 1990 * Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a 1991 * hole, returns 0 and ``*block`` is also set to 0. 1992 */ 1993 int bmap(struct inode *inode, sector_t *block) 1994 { 1995 if (!inode->i_mapping->a_ops->bmap) 1996 return -EINVAL; 1997 1998 *block = inode->i_mapping->a_ops->bmap(inode->i_mapping, *block); 1999 return 0; 2000 } 2001 EXPORT_SYMBOL(bmap); 2002 #endif 2003 2004 /* 2005 * With relative atime, only update atime if the previous atime is 2006 * earlier than or equal to either the ctime or mtime, 2007 * or if at least a day has passed since the last atime update. 2008 */ 2009 static bool relatime_need_update(struct vfsmount *mnt, struct inode *inode, 2010 struct timespec64 now) 2011 { 2012 struct timespec64 atime, mtime, ctime; 2013 2014 if (!(mnt->mnt_flags & MNT_RELATIME)) 2015 return true; 2016 /* 2017 * Is mtime younger than or equal to atime? If yes, update atime: 2018 */ 2019 atime = inode_get_atime(inode); 2020 mtime = inode_get_mtime(inode); 2021 if (timespec64_compare(&mtime, &atime) >= 0) 2022 return true; 2023 /* 2024 * Is ctime younger than or equal to atime? If yes, update atime: 2025 */ 2026 ctime = inode_get_ctime(inode); 2027 if (timespec64_compare(&ctime, &atime) >= 0) 2028 return true; 2029 2030 /* 2031 * Is the previous atime value older than a day? If yes, 2032 * update atime: 2033 */ 2034 if ((long)(now.tv_sec - atime.tv_sec) >= 24*60*60) 2035 return true; 2036 /* 2037 * Good, we can skip the atime update: 2038 */ 2039 return false; 2040 } 2041 2042 /** 2043 * inode_update_timestamps - update the timestamps on the inode 2044 * @inode: inode to be updated 2045 * @flags: S_* flags that needed to be updated 2046 * 2047 * The update_time function is called when an inode's timestamps need to be 2048 * updated for a read or write operation. This function handles updating the 2049 * actual timestamps. It's up to the caller to ensure that the inode is marked 2050 * dirty appropriately. 2051 * 2052 * In the case where any of S_MTIME, S_CTIME, or S_VERSION need to be updated, 2053 * attempt to update all three of them. S_ATIME updates can be handled 2054 * independently of the rest. 2055 * 2056 * Returns a set of S_* flags indicating which values changed. 2057 */ 2058 int inode_update_timestamps(struct inode *inode, int flags) 2059 { 2060 int updated = 0; 2061 struct timespec64 now; 2062 2063 if (flags & (S_MTIME|S_CTIME|S_VERSION)) { 2064 struct timespec64 ctime = inode_get_ctime(inode); 2065 struct timespec64 mtime = inode_get_mtime(inode); 2066 2067 now = inode_set_ctime_current(inode); 2068 if (!timespec64_equal(&now, &ctime)) 2069 updated |= S_CTIME; 2070 if (!timespec64_equal(&now, &mtime)) { 2071 inode_set_mtime_to_ts(inode, now); 2072 updated |= S_MTIME; 2073 } 2074 if (IS_I_VERSION(inode) && inode_maybe_inc_iversion(inode, updated)) 2075 updated |= S_VERSION; 2076 } else { 2077 now = current_time(inode); 2078 } 2079 2080 if (flags & S_ATIME) { 2081 struct timespec64 atime = inode_get_atime(inode); 2082 2083 if (!timespec64_equal(&now, &atime)) { 2084 inode_set_atime_to_ts(inode, now); 2085 updated |= S_ATIME; 2086 } 2087 } 2088 return updated; 2089 } 2090 EXPORT_SYMBOL(inode_update_timestamps); 2091 2092 /** 2093 * generic_update_time - update the timestamps on the inode 2094 * @inode: inode to be updated 2095 * @flags: S_* flags that needed to be updated 2096 * 2097 * The update_time function is called when an inode's timestamps need to be 2098 * updated for a read or write operation. In the case where any of S_MTIME, S_CTIME, 2099 * or S_VERSION need to be updated we attempt to update all three of them. S_ATIME 2100 * updates can be handled done independently of the rest. 2101 * 2102 * Returns a S_* mask indicating which fields were updated. 2103 */ 2104 int generic_update_time(struct inode *inode, int flags) 2105 { 2106 int updated = inode_update_timestamps(inode, flags); 2107 int dirty_flags = 0; 2108 2109 if (updated & (S_ATIME|S_MTIME|S_CTIME)) 2110 dirty_flags = inode->i_sb->s_flags & SB_LAZYTIME ? I_DIRTY_TIME : I_DIRTY_SYNC; 2111 if (updated & S_VERSION) 2112 dirty_flags |= I_DIRTY_SYNC; 2113 __mark_inode_dirty(inode, dirty_flags); 2114 return updated; 2115 } 2116 EXPORT_SYMBOL(generic_update_time); 2117 2118 /* 2119 * This does the actual work of updating an inodes time or version. Must have 2120 * had called mnt_want_write() before calling this. 2121 */ 2122 int inode_update_time(struct inode *inode, int flags) 2123 { 2124 if (inode->i_op->update_time) 2125 return inode->i_op->update_time(inode, flags); 2126 generic_update_time(inode, flags); 2127 return 0; 2128 } 2129 EXPORT_SYMBOL(inode_update_time); 2130 2131 /** 2132 * atime_needs_update - update the access time 2133 * @path: the &struct path to update 2134 * @inode: inode to update 2135 * 2136 * Update the accessed time on an inode and mark it for writeback. 2137 * This function automatically handles read only file systems and media, 2138 * as well as the "noatime" flag and inode specific "noatime" markers. 2139 */ 2140 bool atime_needs_update(const struct path *path, struct inode *inode) 2141 { 2142 struct vfsmount *mnt = path->mnt; 2143 struct timespec64 now, atime; 2144 2145 if (inode->i_flags & S_NOATIME) 2146 return false; 2147 2148 /* Atime updates will likely cause i_uid and i_gid to be written 2149 * back improprely if their true value is unknown to the vfs. 2150 */ 2151 if (HAS_UNMAPPED_ID(mnt_idmap(mnt), inode)) 2152 return false; 2153 2154 if (IS_NOATIME(inode)) 2155 return false; 2156 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)) 2157 return false; 2158 2159 if (mnt->mnt_flags & MNT_NOATIME) 2160 return false; 2161 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) 2162 return false; 2163 2164 now = current_time(inode); 2165 2166 if (!relatime_need_update(mnt, inode, now)) 2167 return false; 2168 2169 atime = inode_get_atime(inode); 2170 if (timespec64_equal(&atime, &now)) 2171 return false; 2172 2173 return true; 2174 } 2175 2176 void touch_atime(const struct path *path) 2177 { 2178 struct vfsmount *mnt = path->mnt; 2179 struct inode *inode = d_inode(path->dentry); 2180 2181 if (!atime_needs_update(path, inode)) 2182 return; 2183 2184 if (!sb_start_write_trylock(inode->i_sb)) 2185 return; 2186 2187 if (mnt_get_write_access(mnt) != 0) 2188 goto skip_update; 2189 /* 2190 * File systems can error out when updating inodes if they need to 2191 * allocate new space to modify an inode (such is the case for 2192 * Btrfs), but since we touch atime while walking down the path we 2193 * really don't care if we failed to update the atime of the file, 2194 * so just ignore the return value. 2195 * We may also fail on filesystems that have the ability to make parts 2196 * of the fs read only, e.g. subvolumes in Btrfs. 2197 */ 2198 inode_update_time(inode, S_ATIME); 2199 mnt_put_write_access(mnt); 2200 skip_update: 2201 sb_end_write(inode->i_sb); 2202 } 2203 EXPORT_SYMBOL(touch_atime); 2204 2205 /* 2206 * Return mask of changes for notify_change() that need to be done as a 2207 * response to write or truncate. Return 0 if nothing has to be changed. 2208 * Negative value on error (change should be denied). 2209 */ 2210 int dentry_needs_remove_privs(struct mnt_idmap *idmap, 2211 struct dentry *dentry) 2212 { 2213 struct inode *inode = d_inode(dentry); 2214 int mask = 0; 2215 int ret; 2216 2217 if (IS_NOSEC(inode)) 2218 return 0; 2219 2220 mask = setattr_should_drop_suidgid(idmap, inode); 2221 ret = security_inode_need_killpriv(dentry); 2222 if (ret < 0) 2223 return ret; 2224 if (ret) 2225 mask |= ATTR_KILL_PRIV; 2226 return mask; 2227 } 2228 2229 static int __remove_privs(struct mnt_idmap *idmap, 2230 struct dentry *dentry, int kill) 2231 { 2232 struct iattr newattrs; 2233 2234 newattrs.ia_valid = ATTR_FORCE | kill; 2235 /* 2236 * Note we call this on write, so notify_change will not 2237 * encounter any conflicting delegations: 2238 */ 2239 return notify_change(idmap, dentry, &newattrs, NULL); 2240 } 2241 2242 int file_remove_privs_flags(struct file *file, unsigned int flags) 2243 { 2244 struct dentry *dentry = file_dentry(file); 2245 struct inode *inode = file_inode(file); 2246 int error = 0; 2247 int kill; 2248 2249 if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode)) 2250 return 0; 2251 2252 kill = dentry_needs_remove_privs(file_mnt_idmap(file), dentry); 2253 if (kill < 0) 2254 return kill; 2255 2256 if (kill) { 2257 if (flags & IOCB_NOWAIT) 2258 return -EAGAIN; 2259 2260 error = __remove_privs(file_mnt_idmap(file), dentry, kill); 2261 } 2262 2263 if (!error) 2264 inode_has_no_xattr(inode); 2265 return error; 2266 } 2267 EXPORT_SYMBOL_GPL(file_remove_privs_flags); 2268 2269 /** 2270 * file_remove_privs - remove special file privileges (suid, capabilities) 2271 * @file: file to remove privileges from 2272 * 2273 * When file is modified by a write or truncation ensure that special 2274 * file privileges are removed. 2275 * 2276 * Return: 0 on success, negative errno on failure. 2277 */ 2278 int file_remove_privs(struct file *file) 2279 { 2280 return file_remove_privs_flags(file, 0); 2281 } 2282 EXPORT_SYMBOL(file_remove_privs); 2283 2284 /** 2285 * current_time - Return FS time (possibly fine-grained) 2286 * @inode: inode. 2287 * 2288 * Return the current time truncated to the time granularity supported by 2289 * the fs, as suitable for a ctime/mtime change. If the ctime is flagged 2290 * as having been QUERIED, get a fine-grained timestamp, but don't update 2291 * the floor. 2292 * 2293 * For a multigrain inode, this is effectively an estimate of the timestamp 2294 * that a file would receive. An actual update must go through 2295 * inode_set_ctime_current(). 2296 */ 2297 struct timespec64 current_time(struct inode *inode) 2298 { 2299 struct timespec64 now; 2300 u32 cns; 2301 2302 ktime_get_coarse_real_ts64_mg(&now); 2303 2304 if (!is_mgtime(inode)) 2305 goto out; 2306 2307 /* If nothing has queried it, then coarse time is fine */ 2308 cns = smp_load_acquire(&inode->i_ctime_nsec); 2309 if (cns & I_CTIME_QUERIED) { 2310 /* 2311 * If there is no apparent change, then get a fine-grained 2312 * timestamp. 2313 */ 2314 if (now.tv_nsec == (cns & ~I_CTIME_QUERIED)) 2315 ktime_get_real_ts64(&now); 2316 } 2317 out: 2318 return timestamp_truncate(now, inode); 2319 } 2320 EXPORT_SYMBOL(current_time); 2321 2322 static int inode_needs_update_time(struct inode *inode) 2323 { 2324 struct timespec64 now, ts; 2325 int sync_it = 0; 2326 2327 /* First try to exhaust all avenues to not sync */ 2328 if (IS_NOCMTIME(inode)) 2329 return 0; 2330 2331 now = current_time(inode); 2332 2333 ts = inode_get_mtime(inode); 2334 if (!timespec64_equal(&ts, &now)) 2335 sync_it |= S_MTIME; 2336 2337 ts = inode_get_ctime(inode); 2338 if (!timespec64_equal(&ts, &now)) 2339 sync_it |= S_CTIME; 2340 2341 if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode)) 2342 sync_it |= S_VERSION; 2343 2344 return sync_it; 2345 } 2346 2347 static int __file_update_time(struct file *file, int sync_mode) 2348 { 2349 int ret = 0; 2350 struct inode *inode = file_inode(file); 2351 2352 /* try to update time settings */ 2353 if (!mnt_get_write_access_file(file)) { 2354 ret = inode_update_time(inode, sync_mode); 2355 mnt_put_write_access_file(file); 2356 } 2357 2358 return ret; 2359 } 2360 2361 /** 2362 * file_update_time - update mtime and ctime time 2363 * @file: file accessed 2364 * 2365 * Update the mtime and ctime members of an inode and mark the inode for 2366 * writeback. Note that this function is meant exclusively for usage in 2367 * the file write path of filesystems, and filesystems may choose to 2368 * explicitly ignore updates via this function with the _NOCMTIME inode 2369 * flag, e.g. for network filesystem where these imestamps are handled 2370 * by the server. This can return an error for file systems who need to 2371 * allocate space in order to update an inode. 2372 * 2373 * Return: 0 on success, negative errno on failure. 2374 */ 2375 int file_update_time(struct file *file) 2376 { 2377 int ret; 2378 struct inode *inode = file_inode(file); 2379 2380 ret = inode_needs_update_time(inode); 2381 if (ret <= 0) 2382 return ret; 2383 2384 return __file_update_time(file, ret); 2385 } 2386 EXPORT_SYMBOL(file_update_time); 2387 2388 /** 2389 * file_modified_flags - handle mandated vfs changes when modifying a file 2390 * @file: file that was modified 2391 * @flags: kiocb flags 2392 * 2393 * When file has been modified ensure that special 2394 * file privileges are removed and time settings are updated. 2395 * 2396 * If IOCB_NOWAIT is set, special file privileges will not be removed and 2397 * time settings will not be updated. It will return -EAGAIN. 2398 * 2399 * Context: Caller must hold the file's inode lock. 2400 * 2401 * Return: 0 on success, negative errno on failure. 2402 */ 2403 static int file_modified_flags(struct file *file, int flags) 2404 { 2405 int ret; 2406 struct inode *inode = file_inode(file); 2407 2408 /* 2409 * Clear the security bits if the process is not being run by root. 2410 * This keeps people from modifying setuid and setgid binaries. 2411 */ 2412 ret = file_remove_privs_flags(file, flags); 2413 if (ret) 2414 return ret; 2415 2416 if (unlikely(file->f_mode & FMODE_NOCMTIME)) 2417 return 0; 2418 2419 ret = inode_needs_update_time(inode); 2420 if (ret <= 0) 2421 return ret; 2422 if (flags & IOCB_NOWAIT) 2423 return -EAGAIN; 2424 2425 return __file_update_time(file, ret); 2426 } 2427 2428 /** 2429 * file_modified - handle mandated vfs changes when modifying a file 2430 * @file: file that was modified 2431 * 2432 * When file has been modified ensure that special 2433 * file privileges are removed and time settings are updated. 2434 * 2435 * Context: Caller must hold the file's inode lock. 2436 * 2437 * Return: 0 on success, negative errno on failure. 2438 */ 2439 int file_modified(struct file *file) 2440 { 2441 return file_modified_flags(file, 0); 2442 } 2443 EXPORT_SYMBOL(file_modified); 2444 2445 /** 2446 * kiocb_modified - handle mandated vfs changes when modifying a file 2447 * @iocb: iocb that was modified 2448 * 2449 * When file has been modified ensure that special 2450 * file privileges are removed and time settings are updated. 2451 * 2452 * Context: Caller must hold the file's inode lock. 2453 * 2454 * Return: 0 on success, negative errno on failure. 2455 */ 2456 int kiocb_modified(struct kiocb *iocb) 2457 { 2458 return file_modified_flags(iocb->ki_filp, iocb->ki_flags); 2459 } 2460 EXPORT_SYMBOL_GPL(kiocb_modified); 2461 2462 int inode_needs_sync(struct inode *inode) 2463 { 2464 if (IS_SYNC(inode)) 2465 return 1; 2466 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 2467 return 1; 2468 return 0; 2469 } 2470 EXPORT_SYMBOL(inode_needs_sync); 2471 2472 /* 2473 * If we try to find an inode in the inode hash while it is being 2474 * deleted, we have to wait until the filesystem completes its 2475 * deletion before reporting that it isn't found. This function waits 2476 * until the deletion _might_ have completed. Callers are responsible 2477 * to recheck inode state. 2478 * 2479 * It doesn't matter if I_NEW is not set initially, a call to 2480 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list 2481 * will DTRT. 2482 */ 2483 static void __wait_on_freeing_inode(struct inode *inode, bool is_inode_hash_locked) 2484 { 2485 struct wait_bit_queue_entry wqe; 2486 struct wait_queue_head *wq_head; 2487 2488 /* 2489 * Handle racing against evict(), see that routine for more details. 2490 */ 2491 if (unlikely(inode_unhashed(inode))) { 2492 WARN_ON(is_inode_hash_locked); 2493 spin_unlock(&inode->i_lock); 2494 return; 2495 } 2496 2497 wq_head = inode_bit_waitqueue(&wqe, inode, __I_NEW); 2498 prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE); 2499 spin_unlock(&inode->i_lock); 2500 rcu_read_unlock(); 2501 if (is_inode_hash_locked) 2502 spin_unlock(&inode_hash_lock); 2503 schedule(); 2504 finish_wait(wq_head, &wqe.wq_entry); 2505 if (is_inode_hash_locked) 2506 spin_lock(&inode_hash_lock); 2507 rcu_read_lock(); 2508 } 2509 2510 static __initdata unsigned long ihash_entries; 2511 static int __init set_ihash_entries(char *str) 2512 { 2513 if (!str) 2514 return 0; 2515 ihash_entries = simple_strtoul(str, &str, 0); 2516 return 1; 2517 } 2518 __setup("ihash_entries=", set_ihash_entries); 2519 2520 /* 2521 * Initialize the waitqueues and inode hash table. 2522 */ 2523 void __init inode_init_early(void) 2524 { 2525 /* If hashes are distributed across NUMA nodes, defer 2526 * hash allocation until vmalloc space is available. 2527 */ 2528 if (hashdist) 2529 return; 2530 2531 inode_hashtable = 2532 alloc_large_system_hash("Inode-cache", 2533 sizeof(struct hlist_head), 2534 ihash_entries, 2535 14, 2536 HASH_EARLY | HASH_ZERO, 2537 &i_hash_shift, 2538 &i_hash_mask, 2539 0, 2540 0); 2541 } 2542 2543 void __init inode_init(void) 2544 { 2545 /* inode slab cache */ 2546 inode_cachep = kmem_cache_create("inode_cache", 2547 sizeof(struct inode), 2548 0, 2549 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 2550 SLAB_ACCOUNT), 2551 init_once); 2552 2553 /* Hash may have been set up in inode_init_early */ 2554 if (!hashdist) 2555 return; 2556 2557 inode_hashtable = 2558 alloc_large_system_hash("Inode-cache", 2559 sizeof(struct hlist_head), 2560 ihash_entries, 2561 14, 2562 HASH_ZERO, 2563 &i_hash_shift, 2564 &i_hash_mask, 2565 0, 2566 0); 2567 } 2568 2569 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) 2570 { 2571 inode->i_mode = mode; 2572 if (S_ISCHR(mode)) { 2573 inode->i_fop = &def_chr_fops; 2574 inode->i_rdev = rdev; 2575 } else if (S_ISBLK(mode)) { 2576 if (IS_ENABLED(CONFIG_BLOCK)) 2577 inode->i_fop = &def_blk_fops; 2578 inode->i_rdev = rdev; 2579 } else if (S_ISFIFO(mode)) 2580 inode->i_fop = &pipefifo_fops; 2581 else if (S_ISSOCK(mode)) 2582 ; /* leave it no_open_fops */ 2583 else 2584 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" 2585 " inode %s:%lu\n", mode, inode->i_sb->s_id, 2586 inode->i_ino); 2587 } 2588 EXPORT_SYMBOL(init_special_inode); 2589 2590 /** 2591 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards 2592 * @idmap: idmap of the mount the inode was created from 2593 * @inode: New inode 2594 * @dir: Directory inode 2595 * @mode: mode of the new inode 2596 * 2597 * If the inode has been created through an idmapped mount the idmap of 2598 * the vfsmount must be passed through @idmap. This function will then take 2599 * care to map the inode according to @idmap before checking permissions 2600 * and initializing i_uid and i_gid. On non-idmapped mounts or if permission 2601 * checking is to be performed on the raw inode simply pass @nop_mnt_idmap. 2602 */ 2603 void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode, 2604 const struct inode *dir, umode_t mode) 2605 { 2606 inode_fsuid_set(inode, idmap); 2607 if (dir && dir->i_mode & S_ISGID) { 2608 inode->i_gid = dir->i_gid; 2609 2610 /* Directories are special, and always inherit S_ISGID */ 2611 if (S_ISDIR(mode)) 2612 mode |= S_ISGID; 2613 } else 2614 inode_fsgid_set(inode, idmap); 2615 inode->i_mode = mode; 2616 } 2617 EXPORT_SYMBOL(inode_init_owner); 2618 2619 /** 2620 * inode_owner_or_capable - check current task permissions to inode 2621 * @idmap: idmap of the mount the inode was found from 2622 * @inode: inode being checked 2623 * 2624 * Return true if current either has CAP_FOWNER in a namespace with the 2625 * inode owner uid mapped, or owns the file. 2626 * 2627 * If the inode has been found through an idmapped mount the idmap of 2628 * the vfsmount must be passed through @idmap. This function will then take 2629 * care to map the inode according to @idmap before checking permissions. 2630 * On non-idmapped mounts or if permission checking is to be performed on the 2631 * raw inode simply pass @nop_mnt_idmap. 2632 */ 2633 bool inode_owner_or_capable(struct mnt_idmap *idmap, 2634 const struct inode *inode) 2635 { 2636 vfsuid_t vfsuid; 2637 struct user_namespace *ns; 2638 2639 vfsuid = i_uid_into_vfsuid(idmap, inode); 2640 if (vfsuid_eq_kuid(vfsuid, current_fsuid())) 2641 return true; 2642 2643 ns = current_user_ns(); 2644 if (vfsuid_has_mapping(ns, vfsuid) && ns_capable(ns, CAP_FOWNER)) 2645 return true; 2646 return false; 2647 } 2648 EXPORT_SYMBOL(inode_owner_or_capable); 2649 2650 /* 2651 * Direct i/o helper functions 2652 */ 2653 bool inode_dio_finished(const struct inode *inode) 2654 { 2655 return atomic_read(&inode->i_dio_count) == 0; 2656 } 2657 EXPORT_SYMBOL(inode_dio_finished); 2658 2659 /** 2660 * inode_dio_wait - wait for outstanding DIO requests to finish 2661 * @inode: inode to wait for 2662 * 2663 * Waits for all pending direct I/O requests to finish so that we can 2664 * proceed with a truncate or equivalent operation. 2665 * 2666 * Must be called under a lock that serializes taking new references 2667 * to i_dio_count, usually by inode->i_mutex. 2668 */ 2669 void inode_dio_wait(struct inode *inode) 2670 { 2671 wait_var_event(&inode->i_dio_count, inode_dio_finished(inode)); 2672 } 2673 EXPORT_SYMBOL(inode_dio_wait); 2674 2675 void inode_dio_wait_interruptible(struct inode *inode) 2676 { 2677 wait_var_event_interruptible(&inode->i_dio_count, 2678 inode_dio_finished(inode)); 2679 } 2680 EXPORT_SYMBOL(inode_dio_wait_interruptible); 2681 2682 /* 2683 * inode_set_flags - atomically set some inode flags 2684 * 2685 * Note: the caller should be holding i_mutex, or else be sure that 2686 * they have exclusive access to the inode structure (i.e., while the 2687 * inode is being instantiated). The reason for the cmpxchg() loop 2688 * --- which wouldn't be necessary if all code paths which modify 2689 * i_flags actually followed this rule, is that there is at least one 2690 * code path which doesn't today so we use cmpxchg() out of an abundance 2691 * of caution. 2692 * 2693 * In the long run, i_mutex is overkill, and we should probably look 2694 * at using the i_lock spinlock to protect i_flags, and then make sure 2695 * it is so documented in include/linux/fs.h and that all code follows 2696 * the locking convention!! 2697 */ 2698 void inode_set_flags(struct inode *inode, unsigned int flags, 2699 unsigned int mask) 2700 { 2701 WARN_ON_ONCE(flags & ~mask); 2702 set_mask_bits(&inode->i_flags, mask, flags); 2703 } 2704 EXPORT_SYMBOL(inode_set_flags); 2705 2706 void inode_nohighmem(struct inode *inode) 2707 { 2708 mapping_set_gfp_mask(inode->i_mapping, GFP_USER); 2709 } 2710 EXPORT_SYMBOL(inode_nohighmem); 2711 2712 struct timespec64 inode_set_ctime_to_ts(struct inode *inode, struct timespec64 ts) 2713 { 2714 trace_inode_set_ctime_to_ts(inode, &ts); 2715 set_normalized_timespec64(&ts, ts.tv_sec, ts.tv_nsec); 2716 inode->i_ctime_sec = ts.tv_sec; 2717 inode->i_ctime_nsec = ts.tv_nsec; 2718 return ts; 2719 } 2720 EXPORT_SYMBOL(inode_set_ctime_to_ts); 2721 2722 /** 2723 * timestamp_truncate - Truncate timespec to a granularity 2724 * @t: Timespec 2725 * @inode: inode being updated 2726 * 2727 * Truncate a timespec to the granularity supported by the fs 2728 * containing the inode. Always rounds down. gran must 2729 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns). 2730 */ 2731 struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode) 2732 { 2733 struct super_block *sb = inode->i_sb; 2734 unsigned int gran = sb->s_time_gran; 2735 2736 t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max); 2737 if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min)) 2738 t.tv_nsec = 0; 2739 2740 /* Avoid division in the common cases 1 ns and 1 s. */ 2741 if (gran == 1) 2742 ; /* nothing */ 2743 else if (gran == NSEC_PER_SEC) 2744 t.tv_nsec = 0; 2745 else if (gran > 1 && gran < NSEC_PER_SEC) 2746 t.tv_nsec -= t.tv_nsec % gran; 2747 else 2748 WARN(1, "invalid file time granularity: %u", gran); 2749 return t; 2750 } 2751 EXPORT_SYMBOL(timestamp_truncate); 2752 2753 /** 2754 * inode_set_ctime_current - set the ctime to current_time 2755 * @inode: inode 2756 * 2757 * Set the inode's ctime to the current value for the inode. Returns the 2758 * current value that was assigned. If this is not a multigrain inode, then we 2759 * set it to the later of the coarse time and floor value. 2760 * 2761 * If it is multigrain, then we first see if the coarse-grained timestamp is 2762 * distinct from what is already there. If so, then use that. Otherwise, get a 2763 * fine-grained timestamp. 2764 * 2765 * After that, try to swap the new value into i_ctime_nsec. Accept the 2766 * resulting ctime, regardless of the outcome of the swap. If it has 2767 * already been replaced, then that timestamp is later than the earlier 2768 * unacceptable one, and is thus acceptable. 2769 */ 2770 struct timespec64 inode_set_ctime_current(struct inode *inode) 2771 { 2772 struct timespec64 now; 2773 u32 cns, cur; 2774 2775 ktime_get_coarse_real_ts64_mg(&now); 2776 now = timestamp_truncate(now, inode); 2777 2778 /* Just return that if this is not a multigrain fs */ 2779 if (!is_mgtime(inode)) { 2780 inode_set_ctime_to_ts(inode, now); 2781 goto out; 2782 } 2783 2784 /* 2785 * A fine-grained time is only needed if someone has queried 2786 * for timestamps, and the current coarse grained time isn't 2787 * later than what's already there. 2788 */ 2789 cns = smp_load_acquire(&inode->i_ctime_nsec); 2790 if (cns & I_CTIME_QUERIED) { 2791 struct timespec64 ctime = { .tv_sec = inode->i_ctime_sec, 2792 .tv_nsec = cns & ~I_CTIME_QUERIED }; 2793 2794 if (timespec64_compare(&now, &ctime) <= 0) { 2795 ktime_get_real_ts64_mg(&now); 2796 now = timestamp_truncate(now, inode); 2797 mgtime_counter_inc(mg_fine_stamps); 2798 } 2799 } 2800 mgtime_counter_inc(mg_ctime_updates); 2801 2802 /* No need to cmpxchg if it's exactly the same */ 2803 if (cns == now.tv_nsec && inode->i_ctime_sec == now.tv_sec) { 2804 trace_ctime_xchg_skip(inode, &now); 2805 goto out; 2806 } 2807 cur = cns; 2808 retry: 2809 /* Try to swap the nsec value into place. */ 2810 if (try_cmpxchg(&inode->i_ctime_nsec, &cur, now.tv_nsec)) { 2811 /* If swap occurred, then we're (mostly) done */ 2812 inode->i_ctime_sec = now.tv_sec; 2813 trace_ctime_ns_xchg(inode, cns, now.tv_nsec, cur); 2814 mgtime_counter_inc(mg_ctime_swaps); 2815 } else { 2816 /* 2817 * Was the change due to someone marking the old ctime QUERIED? 2818 * If so then retry the swap. This can only happen once since 2819 * the only way to clear I_CTIME_QUERIED is to stamp the inode 2820 * with a new ctime. 2821 */ 2822 if (!(cns & I_CTIME_QUERIED) && (cns | I_CTIME_QUERIED) == cur) { 2823 cns = cur; 2824 goto retry; 2825 } 2826 /* Otherwise, keep the existing ctime */ 2827 now.tv_sec = inode->i_ctime_sec; 2828 now.tv_nsec = cur & ~I_CTIME_QUERIED; 2829 } 2830 out: 2831 return now; 2832 } 2833 EXPORT_SYMBOL(inode_set_ctime_current); 2834 2835 /** 2836 * inode_set_ctime_deleg - try to update the ctime on a delegated inode 2837 * @inode: inode to update 2838 * @update: timespec64 to set the ctime 2839 * 2840 * Attempt to atomically update the ctime on behalf of a delegation holder. 2841 * 2842 * The nfs server can call back the holder of a delegation to get updated 2843 * inode attributes, including the mtime. When updating the mtime, update 2844 * the ctime to a value at least equal to that. 2845 * 2846 * This can race with concurrent updates to the inode, in which 2847 * case the update is skipped. 2848 * 2849 * Note that this works even when multigrain timestamps are not enabled, 2850 * so it is used in either case. 2851 */ 2852 struct timespec64 inode_set_ctime_deleg(struct inode *inode, struct timespec64 update) 2853 { 2854 struct timespec64 now, cur_ts; 2855 u32 cur, old; 2856 2857 /* pairs with try_cmpxchg below */ 2858 cur = smp_load_acquire(&inode->i_ctime_nsec); 2859 cur_ts.tv_nsec = cur & ~I_CTIME_QUERIED; 2860 cur_ts.tv_sec = inode->i_ctime_sec; 2861 2862 /* If the update is older than the existing value, skip it. */ 2863 if (timespec64_compare(&update, &cur_ts) <= 0) 2864 return cur_ts; 2865 2866 ktime_get_coarse_real_ts64_mg(&now); 2867 2868 /* Clamp the update to "now" if it's in the future */ 2869 if (timespec64_compare(&update, &now) > 0) 2870 update = now; 2871 2872 update = timestamp_truncate(update, inode); 2873 2874 /* No need to update if the values are already the same */ 2875 if (timespec64_equal(&update, &cur_ts)) 2876 return cur_ts; 2877 2878 /* 2879 * Try to swap the nsec value into place. If it fails, that means 2880 * it raced with an update due to a write or similar activity. That 2881 * stamp takes precedence, so just skip the update. 2882 */ 2883 retry: 2884 old = cur; 2885 if (try_cmpxchg(&inode->i_ctime_nsec, &cur, update.tv_nsec)) { 2886 inode->i_ctime_sec = update.tv_sec; 2887 mgtime_counter_inc(mg_ctime_swaps); 2888 return update; 2889 } 2890 2891 /* 2892 * Was the change due to another task marking the old ctime QUERIED? 2893 * 2894 * If so, then retry the swap. This can only happen once since 2895 * the only way to clear I_CTIME_QUERIED is to stamp the inode 2896 * with a new ctime. 2897 */ 2898 if (!(old & I_CTIME_QUERIED) && (cur == (old | I_CTIME_QUERIED))) 2899 goto retry; 2900 2901 /* Otherwise, it was a new timestamp. */ 2902 cur_ts.tv_sec = inode->i_ctime_sec; 2903 cur_ts.tv_nsec = cur & ~I_CTIME_QUERIED; 2904 return cur_ts; 2905 } 2906 EXPORT_SYMBOL(inode_set_ctime_deleg); 2907 2908 /** 2909 * in_group_or_capable - check whether caller is CAP_FSETID privileged 2910 * @idmap: idmap of the mount @inode was found from 2911 * @inode: inode to check 2912 * @vfsgid: the new/current vfsgid of @inode 2913 * 2914 * Check whether @vfsgid is in the caller's group list or if the caller is 2915 * privileged with CAP_FSETID over @inode. This can be used to determine 2916 * whether the setgid bit can be kept or must be dropped. 2917 * 2918 * Return: true if the caller is sufficiently privileged, false if not. 2919 */ 2920 bool in_group_or_capable(struct mnt_idmap *idmap, 2921 const struct inode *inode, vfsgid_t vfsgid) 2922 { 2923 if (vfsgid_in_group_p(vfsgid)) 2924 return true; 2925 if (capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID)) 2926 return true; 2927 return false; 2928 } 2929 EXPORT_SYMBOL(in_group_or_capable); 2930 2931 /** 2932 * mode_strip_sgid - handle the sgid bit for non-directories 2933 * @idmap: idmap of the mount the inode was created from 2934 * @dir: parent directory inode 2935 * @mode: mode of the file to be created in @dir 2936 * 2937 * If the @mode of the new file has both the S_ISGID and S_IXGRP bit 2938 * raised and @dir has the S_ISGID bit raised ensure that the caller is 2939 * either in the group of the parent directory or they have CAP_FSETID 2940 * in their user namespace and are privileged over the parent directory. 2941 * In all other cases, strip the S_ISGID bit from @mode. 2942 * 2943 * Return: the new mode to use for the file 2944 */ 2945 umode_t mode_strip_sgid(struct mnt_idmap *idmap, 2946 const struct inode *dir, umode_t mode) 2947 { 2948 if ((mode & (S_ISGID | S_IXGRP)) != (S_ISGID | S_IXGRP)) 2949 return mode; 2950 if (S_ISDIR(mode) || !dir || !(dir->i_mode & S_ISGID)) 2951 return mode; 2952 if (in_group_or_capable(idmap, dir, i_gid_into_vfsgid(idmap, dir))) 2953 return mode; 2954 return mode & ~S_ISGID; 2955 } 2956 EXPORT_SYMBOL(mode_strip_sgid); 2957