1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/inode.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/buffer_head.h> 11 #include <linux/backing-dev.h> 12 #include <linux/writeback.h> 13 14 #include "f2fs.h" 15 #include "node.h" 16 #include "segment.h" 17 #include "xattr.h" 18 19 #include <trace/events/f2fs.h> 20 21 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync) 22 { 23 if (is_inode_flag_set(inode, FI_NEW_INODE)) 24 return; 25 26 if (f2fs_inode_dirtied(inode, sync)) 27 return; 28 29 mark_inode_dirty_sync(inode); 30 } 31 32 void f2fs_set_inode_flags(struct inode *inode) 33 { 34 unsigned int flags = F2FS_I(inode)->i_flags; 35 unsigned int new_fl = 0; 36 37 if (flags & F2FS_SYNC_FL) 38 new_fl |= S_SYNC; 39 if (flags & F2FS_APPEND_FL) 40 new_fl |= S_APPEND; 41 if (flags & F2FS_IMMUTABLE_FL) 42 new_fl |= S_IMMUTABLE; 43 if (flags & F2FS_NOATIME_FL) 44 new_fl |= S_NOATIME; 45 if (flags & F2FS_DIRSYNC_FL) 46 new_fl |= S_DIRSYNC; 47 if (file_is_encrypt(inode)) 48 new_fl |= S_ENCRYPTED; 49 if (file_is_verity(inode)) 50 new_fl |= S_VERITY; 51 if (flags & F2FS_CASEFOLD_FL) 52 new_fl |= S_CASEFOLD; 53 inode_set_flags(inode, new_fl, 54 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC| 55 S_ENCRYPTED|S_VERITY|S_CASEFOLD); 56 } 57 58 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri) 59 { 60 int extra_size = get_extra_isize(inode); 61 62 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 63 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 64 if (ri->i_addr[extra_size]) 65 inode->i_rdev = old_decode_dev( 66 le32_to_cpu(ri->i_addr[extra_size])); 67 else 68 inode->i_rdev = new_decode_dev( 69 le32_to_cpu(ri->i_addr[extra_size + 1])); 70 } 71 } 72 73 static int __written_first_block(struct f2fs_sb_info *sbi, 74 struct f2fs_inode *ri) 75 { 76 block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]); 77 78 if (!__is_valid_data_blkaddr(addr)) 79 return 1; 80 if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE)) 81 return -EFSCORRUPTED; 82 return 0; 83 } 84 85 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri) 86 { 87 int extra_size = get_extra_isize(inode); 88 89 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 90 if (old_valid_dev(inode->i_rdev)) { 91 ri->i_addr[extra_size] = 92 cpu_to_le32(old_encode_dev(inode->i_rdev)); 93 ri->i_addr[extra_size + 1] = 0; 94 } else { 95 ri->i_addr[extra_size] = 0; 96 ri->i_addr[extra_size + 1] = 97 cpu_to_le32(new_encode_dev(inode->i_rdev)); 98 ri->i_addr[extra_size + 2] = 0; 99 } 100 } 101 } 102 103 static void __recover_inline_status(struct inode *inode, struct page *ipage) 104 { 105 void *inline_data = inline_data_addr(inode, ipage); 106 __le32 *start = inline_data; 107 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32); 108 109 while (start < end) { 110 if (*start++) { 111 f2fs_wait_on_page_writeback(ipage, NODE, true, true); 112 113 set_inode_flag(inode, FI_DATA_EXIST); 114 set_raw_inline(inode, F2FS_INODE(ipage)); 115 set_page_dirty(ipage); 116 return; 117 } 118 } 119 return; 120 } 121 122 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page) 123 { 124 struct f2fs_inode *ri = &F2FS_NODE(page)->i; 125 126 if (!f2fs_sb_has_inode_chksum(sbi)) 127 return false; 128 129 if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR)) 130 return false; 131 132 if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize), 133 i_inode_checksum)) 134 return false; 135 136 return true; 137 } 138 139 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page) 140 { 141 struct f2fs_node *node = F2FS_NODE(page); 142 struct f2fs_inode *ri = &node->i; 143 __le32 ino = node->footer.ino; 144 __le32 gen = ri->i_generation; 145 __u32 chksum, chksum_seed; 146 __u32 dummy_cs = 0; 147 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum); 148 unsigned int cs_size = sizeof(dummy_cs); 149 150 chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino, 151 sizeof(ino)); 152 chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen)); 153 154 chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset); 155 chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size); 156 offset += cs_size; 157 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset, 158 F2FS_BLKSIZE - offset); 159 return chksum; 160 } 161 162 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page) 163 { 164 struct f2fs_inode *ri; 165 __u32 provided, calculated; 166 167 if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN))) 168 return true; 169 170 #ifdef CONFIG_F2FS_CHECK_FS 171 if (!f2fs_enable_inode_chksum(sbi, page)) 172 #else 173 if (!f2fs_enable_inode_chksum(sbi, page) || 174 PageDirty(page) || PageWriteback(page)) 175 #endif 176 return true; 177 178 ri = &F2FS_NODE(page)->i; 179 provided = le32_to_cpu(ri->i_inode_checksum); 180 calculated = f2fs_inode_chksum(sbi, page); 181 182 if (provided != calculated) 183 f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x", 184 page->index, ino_of_node(page), provided, calculated); 185 186 return provided == calculated; 187 } 188 189 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page) 190 { 191 struct f2fs_inode *ri = &F2FS_NODE(page)->i; 192 193 if (!f2fs_enable_inode_chksum(sbi, page)) 194 return; 195 196 ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page)); 197 } 198 199 static bool sanity_check_inode(struct inode *inode, struct page *node_page) 200 { 201 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 202 struct f2fs_inode_info *fi = F2FS_I(inode); 203 unsigned long long iblocks; 204 205 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks); 206 if (!iblocks) { 207 set_sbi_flag(sbi, SBI_NEED_FSCK); 208 f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.", 209 __func__, inode->i_ino, iblocks); 210 return false; 211 } 212 213 if (ino_of_node(node_page) != nid_of_node(node_page)) { 214 set_sbi_flag(sbi, SBI_NEED_FSCK); 215 f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.", 216 __func__, inode->i_ino, 217 ino_of_node(node_page), nid_of_node(node_page)); 218 return false; 219 } 220 221 if (f2fs_sb_has_flexible_inline_xattr(sbi) 222 && !f2fs_has_extra_attr(inode)) { 223 set_sbi_flag(sbi, SBI_NEED_FSCK); 224 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.", 225 __func__, inode->i_ino); 226 return false; 227 } 228 229 if (f2fs_has_extra_attr(inode) && 230 !f2fs_sb_has_extra_attr(sbi)) { 231 set_sbi_flag(sbi, SBI_NEED_FSCK); 232 f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off", 233 __func__, inode->i_ino); 234 return false; 235 } 236 237 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE || 238 fi->i_extra_isize % sizeof(__le32)) { 239 set_sbi_flag(sbi, SBI_NEED_FSCK); 240 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu", 241 __func__, inode->i_ino, fi->i_extra_isize, 242 F2FS_TOTAL_EXTRA_ATTR_SIZE); 243 return false; 244 } 245 246 if (f2fs_has_extra_attr(inode) && 247 f2fs_sb_has_flexible_inline_xattr(sbi) && 248 f2fs_has_inline_xattr(inode) && 249 (!fi->i_inline_xattr_size || 250 fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) { 251 set_sbi_flag(sbi, SBI_NEED_FSCK); 252 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu", 253 __func__, inode->i_ino, fi->i_inline_xattr_size, 254 MAX_INLINE_XATTR_SIZE); 255 return false; 256 } 257 258 if (F2FS_I(inode)->extent_tree) { 259 struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest; 260 261 if (ei->len && 262 (!f2fs_is_valid_blkaddr(sbi, ei->blk, 263 DATA_GENERIC_ENHANCE) || 264 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1, 265 DATA_GENERIC_ENHANCE))) { 266 set_sbi_flag(sbi, SBI_NEED_FSCK); 267 f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix", 268 __func__, inode->i_ino, 269 ei->blk, ei->fofs, ei->len); 270 return false; 271 } 272 } 273 274 if (f2fs_has_inline_data(inode) && 275 (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) { 276 set_sbi_flag(sbi, SBI_NEED_FSCK); 277 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix", 278 __func__, inode->i_ino, inode->i_mode); 279 return false; 280 } 281 282 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) { 283 set_sbi_flag(sbi, SBI_NEED_FSCK); 284 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix", 285 __func__, inode->i_ino, inode->i_mode); 286 return false; 287 } 288 289 return true; 290 } 291 292 static int do_read_inode(struct inode *inode) 293 { 294 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 295 struct f2fs_inode_info *fi = F2FS_I(inode); 296 struct page *node_page; 297 struct f2fs_inode *ri; 298 projid_t i_projid; 299 int err; 300 301 /* Check if ino is within scope */ 302 if (f2fs_check_nid_range(sbi, inode->i_ino)) 303 return -EINVAL; 304 305 node_page = f2fs_get_node_page(sbi, inode->i_ino); 306 if (IS_ERR(node_page)) 307 return PTR_ERR(node_page); 308 309 ri = F2FS_INODE(node_page); 310 311 inode->i_mode = le16_to_cpu(ri->i_mode); 312 i_uid_write(inode, le32_to_cpu(ri->i_uid)); 313 i_gid_write(inode, le32_to_cpu(ri->i_gid)); 314 set_nlink(inode, le32_to_cpu(ri->i_links)); 315 inode->i_size = le64_to_cpu(ri->i_size); 316 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1); 317 318 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime); 319 inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime); 320 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime); 321 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec); 322 inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec); 323 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec); 324 inode->i_generation = le32_to_cpu(ri->i_generation); 325 if (S_ISDIR(inode->i_mode)) 326 fi->i_current_depth = le32_to_cpu(ri->i_current_depth); 327 else if (S_ISREG(inode->i_mode)) 328 fi->i_gc_failures[GC_FAILURE_PIN] = 329 le16_to_cpu(ri->i_gc_failures); 330 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid); 331 fi->i_flags = le32_to_cpu(ri->i_flags); 332 if (S_ISREG(inode->i_mode)) 333 fi->i_flags &= ~F2FS_PROJINHERIT_FL; 334 fi->flags = 0; 335 fi->i_advise = ri->i_advise; 336 fi->i_pino = le32_to_cpu(ri->i_pino); 337 fi->i_dir_level = ri->i_dir_level; 338 339 if (f2fs_init_extent_tree(inode, &ri->i_ext)) 340 set_page_dirty(node_page); 341 342 get_inline_info(inode, ri); 343 344 fi->i_extra_isize = f2fs_has_extra_attr(inode) ? 345 le16_to_cpu(ri->i_extra_isize) : 0; 346 347 if (f2fs_sb_has_flexible_inline_xattr(sbi)) { 348 fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size); 349 } else if (f2fs_has_inline_xattr(inode) || 350 f2fs_has_inline_dentry(inode)) { 351 fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; 352 } else { 353 354 /* 355 * Previous inline data or directory always reserved 200 bytes 356 * in inode layout, even if inline_xattr is disabled. In order 357 * to keep inline_dentry's structure for backward compatibility, 358 * we get the space back only from inline_data. 359 */ 360 fi->i_inline_xattr_size = 0; 361 } 362 363 if (!sanity_check_inode(inode, node_page)) { 364 f2fs_put_page(node_page, 1); 365 return -EFSCORRUPTED; 366 } 367 368 /* check data exist */ 369 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode)) 370 __recover_inline_status(inode, node_page); 371 372 /* try to recover cold bit for non-dir inode */ 373 if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) { 374 set_cold_node(node_page, false); 375 set_page_dirty(node_page); 376 } 377 378 /* get rdev by using inline_info */ 379 __get_inode_rdev(inode, ri); 380 381 if (S_ISREG(inode->i_mode)) { 382 err = __written_first_block(sbi, ri); 383 if (err < 0) { 384 f2fs_put_page(node_page, 1); 385 return err; 386 } 387 if (!err) 388 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); 389 } 390 391 if (!f2fs_need_inode_block_update(sbi, inode->i_ino)) 392 fi->last_disk_size = inode->i_size; 393 394 if (fi->i_flags & F2FS_PROJINHERIT_FL) 395 set_inode_flag(inode, FI_PROJ_INHERIT); 396 397 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) && 398 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid)) 399 i_projid = (projid_t)le32_to_cpu(ri->i_projid); 400 else 401 i_projid = F2FS_DEF_PROJID; 402 fi->i_projid = make_kprojid(&init_user_ns, i_projid); 403 404 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) && 405 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 406 fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime); 407 fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec); 408 } 409 410 F2FS_I(inode)->i_disk_time[0] = inode->i_atime; 411 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime; 412 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime; 413 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime; 414 f2fs_put_page(node_page, 1); 415 416 stat_inc_inline_xattr(inode); 417 stat_inc_inline_inode(inode); 418 stat_inc_inline_dir(inode); 419 420 return 0; 421 } 422 423 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) 424 { 425 struct f2fs_sb_info *sbi = F2FS_SB(sb); 426 struct inode *inode; 427 int ret = 0; 428 429 inode = iget_locked(sb, ino); 430 if (!inode) 431 return ERR_PTR(-ENOMEM); 432 433 if (!(inode->i_state & I_NEW)) { 434 trace_f2fs_iget(inode); 435 return inode; 436 } 437 if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi)) 438 goto make_now; 439 440 ret = do_read_inode(inode); 441 if (ret) 442 goto bad_inode; 443 make_now: 444 if (ino == F2FS_NODE_INO(sbi)) { 445 inode->i_mapping->a_ops = &f2fs_node_aops; 446 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 447 } else if (ino == F2FS_META_INO(sbi)) { 448 inode->i_mapping->a_ops = &f2fs_meta_aops; 449 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 450 } else if (S_ISREG(inode->i_mode)) { 451 inode->i_op = &f2fs_file_inode_operations; 452 inode->i_fop = &f2fs_file_operations; 453 inode->i_mapping->a_ops = &f2fs_dblock_aops; 454 } else if (S_ISDIR(inode->i_mode)) { 455 inode->i_op = &f2fs_dir_inode_operations; 456 inode->i_fop = &f2fs_dir_operations; 457 inode->i_mapping->a_ops = &f2fs_dblock_aops; 458 inode_nohighmem(inode); 459 } else if (S_ISLNK(inode->i_mode)) { 460 if (file_is_encrypt(inode)) 461 inode->i_op = &f2fs_encrypted_symlink_inode_operations; 462 else 463 inode->i_op = &f2fs_symlink_inode_operations; 464 inode_nohighmem(inode); 465 inode->i_mapping->a_ops = &f2fs_dblock_aops; 466 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 467 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 468 inode->i_op = &f2fs_special_inode_operations; 469 init_special_inode(inode, inode->i_mode, inode->i_rdev); 470 } else { 471 ret = -EIO; 472 goto bad_inode; 473 } 474 f2fs_set_inode_flags(inode); 475 unlock_new_inode(inode); 476 trace_f2fs_iget(inode); 477 return inode; 478 479 bad_inode: 480 f2fs_inode_synced(inode); 481 iget_failed(inode); 482 trace_f2fs_iget_exit(inode, ret); 483 return ERR_PTR(ret); 484 } 485 486 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino) 487 { 488 struct inode *inode; 489 retry: 490 inode = f2fs_iget(sb, ino); 491 if (IS_ERR(inode)) { 492 if (PTR_ERR(inode) == -ENOMEM) { 493 congestion_wait(BLK_RW_ASYNC, HZ/50); 494 goto retry; 495 } 496 } 497 return inode; 498 } 499 500 void f2fs_update_inode(struct inode *inode, struct page *node_page) 501 { 502 struct f2fs_inode *ri; 503 struct extent_tree *et = F2FS_I(inode)->extent_tree; 504 505 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 506 set_page_dirty(node_page); 507 508 f2fs_inode_synced(inode); 509 510 ri = F2FS_INODE(node_page); 511 512 ri->i_mode = cpu_to_le16(inode->i_mode); 513 ri->i_advise = F2FS_I(inode)->i_advise; 514 ri->i_uid = cpu_to_le32(i_uid_read(inode)); 515 ri->i_gid = cpu_to_le32(i_gid_read(inode)); 516 ri->i_links = cpu_to_le32(inode->i_nlink); 517 ri->i_size = cpu_to_le64(i_size_read(inode)); 518 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1); 519 520 if (et) { 521 read_lock(&et->lock); 522 set_raw_extent(&et->largest, &ri->i_ext); 523 read_unlock(&et->lock); 524 } else { 525 memset(&ri->i_ext, 0, sizeof(ri->i_ext)); 526 } 527 set_raw_inline(inode, ri); 528 529 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec); 530 ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 531 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 532 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); 533 ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 534 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 535 if (S_ISDIR(inode->i_mode)) 536 ri->i_current_depth = 537 cpu_to_le32(F2FS_I(inode)->i_current_depth); 538 else if (S_ISREG(inode->i_mode)) 539 ri->i_gc_failures = 540 cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]); 541 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid); 542 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags); 543 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino); 544 ri->i_generation = cpu_to_le32(inode->i_generation); 545 ri->i_dir_level = F2FS_I(inode)->i_dir_level; 546 547 if (f2fs_has_extra_attr(inode)) { 548 ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize); 549 550 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode))) 551 ri->i_inline_xattr_size = 552 cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size); 553 554 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) && 555 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 556 i_projid)) { 557 projid_t i_projid; 558 559 i_projid = from_kprojid(&init_user_ns, 560 F2FS_I(inode)->i_projid); 561 ri->i_projid = cpu_to_le32(i_projid); 562 } 563 564 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) && 565 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 566 i_crtime)) { 567 ri->i_crtime = 568 cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec); 569 ri->i_crtime_nsec = 570 cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec); 571 } 572 } 573 574 __set_inode_rdev(inode, ri); 575 576 /* deleted inode */ 577 if (inode->i_nlink == 0) 578 clear_inline_node(node_page); 579 580 F2FS_I(inode)->i_disk_time[0] = inode->i_atime; 581 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime; 582 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime; 583 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime; 584 585 #ifdef CONFIG_F2FS_CHECK_FS 586 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page); 587 #endif 588 } 589 590 void f2fs_update_inode_page(struct inode *inode) 591 { 592 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 593 struct page *node_page; 594 retry: 595 node_page = f2fs_get_node_page(sbi, inode->i_ino); 596 if (IS_ERR(node_page)) { 597 int err = PTR_ERR(node_page); 598 if (err == -ENOMEM) { 599 cond_resched(); 600 goto retry; 601 } else if (err != -ENOENT) { 602 f2fs_stop_checkpoint(sbi, false); 603 } 604 return; 605 } 606 f2fs_update_inode(inode, node_page); 607 f2fs_put_page(node_page, 1); 608 } 609 610 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) 611 { 612 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 613 614 if (inode->i_ino == F2FS_NODE_INO(sbi) || 615 inode->i_ino == F2FS_META_INO(sbi)) 616 return 0; 617 618 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) 619 return 0; 620 621 if (!f2fs_is_checkpoint_ready(sbi)) 622 return -ENOSPC; 623 624 /* 625 * We need to balance fs here to prevent from producing dirty node pages 626 * during the urgent cleaning time when runing out of free sections. 627 */ 628 f2fs_update_inode_page(inode); 629 if (wbc && wbc->nr_to_write) 630 f2fs_balance_fs(sbi, true); 631 return 0; 632 } 633 634 /* 635 * Called at the last iput() if i_nlink is zero 636 */ 637 void f2fs_evict_inode(struct inode *inode) 638 { 639 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 640 nid_t xnid = F2FS_I(inode)->i_xattr_nid; 641 int err = 0; 642 643 /* some remained atomic pages should discarded */ 644 if (f2fs_is_atomic_file(inode)) 645 f2fs_drop_inmem_pages(inode); 646 647 trace_f2fs_evict_inode(inode); 648 truncate_inode_pages_final(&inode->i_data); 649 650 if (inode->i_ino == F2FS_NODE_INO(sbi) || 651 inode->i_ino == F2FS_META_INO(sbi)) 652 goto out_clear; 653 654 f2fs_bug_on(sbi, get_dirty_pages(inode)); 655 f2fs_remove_dirty_inode(inode); 656 657 f2fs_destroy_extent_tree(inode); 658 659 if (inode->i_nlink || is_bad_inode(inode)) 660 goto no_delete; 661 662 err = dquot_initialize(inode); 663 if (err) { 664 err = 0; 665 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 666 } 667 668 f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO); 669 f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO); 670 f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO); 671 672 sb_start_intwrite(inode->i_sb); 673 set_inode_flag(inode, FI_NO_ALLOC); 674 i_size_write(inode, 0); 675 retry: 676 if (F2FS_HAS_BLOCKS(inode)) 677 err = f2fs_truncate(inode); 678 679 if (time_to_inject(sbi, FAULT_EVICT_INODE)) { 680 f2fs_show_injection_info(FAULT_EVICT_INODE); 681 err = -EIO; 682 } 683 684 if (!err) { 685 f2fs_lock_op(sbi); 686 err = f2fs_remove_inode_page(inode); 687 f2fs_unlock_op(sbi); 688 if (err == -ENOENT) 689 err = 0; 690 } 691 692 /* give more chances, if ENOMEM case */ 693 if (err == -ENOMEM) { 694 err = 0; 695 goto retry; 696 } 697 698 if (err) { 699 f2fs_update_inode_page(inode); 700 if (dquot_initialize_needed(inode)) 701 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 702 } 703 sb_end_intwrite(inode->i_sb); 704 no_delete: 705 dquot_drop(inode); 706 707 stat_dec_inline_xattr(inode); 708 stat_dec_inline_dir(inode); 709 stat_dec_inline_inode(inode); 710 711 if (likely(!f2fs_cp_error(sbi) && 712 !is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 713 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE)); 714 else 715 f2fs_inode_synced(inode); 716 717 /* ino == 0, if f2fs_new_inode() was failed t*/ 718 if (inode->i_ino) 719 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, 720 inode->i_ino); 721 if (xnid) 722 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); 723 if (inode->i_nlink) { 724 if (is_inode_flag_set(inode, FI_APPEND_WRITE)) 725 f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO); 726 if (is_inode_flag_set(inode, FI_UPDATE_WRITE)) 727 f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO); 728 } 729 if (is_inode_flag_set(inode, FI_FREE_NID)) { 730 f2fs_alloc_nid_failed(sbi, inode->i_ino); 731 clear_inode_flag(inode, FI_FREE_NID); 732 } else { 733 /* 734 * If xattr nid is corrupted, we can reach out error condition, 735 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)). 736 * In that case, f2fs_check_nid_range() is enough to give a clue. 737 */ 738 } 739 out_clear: 740 fscrypt_put_encryption_info(inode); 741 fsverity_cleanup_inode(inode); 742 clear_inode(inode); 743 } 744 745 /* caller should call f2fs_lock_op() */ 746 void f2fs_handle_failed_inode(struct inode *inode) 747 { 748 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 749 struct node_info ni; 750 int err; 751 752 /* 753 * clear nlink of inode in order to release resource of inode 754 * immediately. 755 */ 756 clear_nlink(inode); 757 758 /* 759 * we must call this to avoid inode being remained as dirty, resulting 760 * in a panic when flushing dirty inodes in gdirty_list. 761 */ 762 f2fs_update_inode_page(inode); 763 f2fs_inode_synced(inode); 764 765 /* don't make bad inode, since it becomes a regular file. */ 766 unlock_new_inode(inode); 767 768 /* 769 * Note: we should add inode to orphan list before f2fs_unlock_op() 770 * so we can prevent losing this orphan when encoutering checkpoint 771 * and following suddenly power-off. 772 */ 773 err = f2fs_get_node_info(sbi, inode->i_ino, &ni); 774 if (err) { 775 set_sbi_flag(sbi, SBI_NEED_FSCK); 776 f2fs_warn(sbi, "May loss orphan inode, run fsck to fix."); 777 goto out; 778 } 779 780 if (ni.blk_addr != NULL_ADDR) { 781 err = f2fs_acquire_orphan_inode(sbi); 782 if (err) { 783 set_sbi_flag(sbi, SBI_NEED_FSCK); 784 f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix."); 785 } else { 786 f2fs_add_orphan_inode(inode); 787 } 788 f2fs_alloc_nid_done(sbi, inode->i_ino); 789 } else { 790 set_inode_flag(inode, FI_FREE_NID); 791 } 792 793 out: 794 f2fs_unlock_op(sbi); 795 796 /* iput will drop the inode object */ 797 iput(inode); 798 } 799