1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/inode.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/writeback.h> 11 #include <linux/sched/mm.h> 12 #include <linux/lz4.h> 13 #include <linux/zstd.h> 14 15 #include "f2fs.h" 16 #include "node.h" 17 #include "segment.h" 18 #include "xattr.h" 19 20 #include <trace/events/f2fs.h> 21 22 #ifdef CONFIG_F2FS_FS_COMPRESSION 23 extern const struct address_space_operations f2fs_compress_aops; 24 #endif 25 26 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync) 27 { 28 if (is_inode_flag_set(inode, FI_NEW_INODE)) 29 return; 30 31 if (f2fs_readonly(F2FS_I_SB(inode)->sb)) 32 return; 33 34 if (f2fs_inode_dirtied(inode, sync)) 35 return; 36 37 if (f2fs_is_atomic_file(inode)) { 38 set_inode_flag(inode, FI_ATOMIC_DIRTIED); 39 return; 40 } 41 42 mark_inode_dirty_sync(inode); 43 } 44 45 void f2fs_set_inode_flags(struct inode *inode) 46 { 47 unsigned int flags = F2FS_I(inode)->i_flags; 48 unsigned int new_fl = 0; 49 50 if (flags & F2FS_SYNC_FL) 51 new_fl |= S_SYNC; 52 if (flags & F2FS_APPEND_FL) 53 new_fl |= S_APPEND; 54 if (flags & F2FS_IMMUTABLE_FL) 55 new_fl |= S_IMMUTABLE; 56 if (flags & F2FS_NOATIME_FL) 57 new_fl |= S_NOATIME; 58 if (flags & F2FS_DIRSYNC_FL) 59 new_fl |= S_DIRSYNC; 60 if (file_is_encrypt(inode)) 61 new_fl |= S_ENCRYPTED; 62 if (file_is_verity(inode)) 63 new_fl |= S_VERITY; 64 if (flags & F2FS_CASEFOLD_FL) 65 new_fl |= S_CASEFOLD; 66 inode_set_flags(inode, new_fl, 67 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC| 68 S_ENCRYPTED|S_VERITY|S_CASEFOLD); 69 } 70 71 static void __get_inode_rdev(struct inode *inode, struct page *node_page) 72 { 73 __le32 *addr = get_dnode_addr(inode, node_page); 74 75 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 76 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 77 if (addr[0]) 78 inode->i_rdev = old_decode_dev(le32_to_cpu(addr[0])); 79 else 80 inode->i_rdev = new_decode_dev(le32_to_cpu(addr[1])); 81 } 82 } 83 84 static void __set_inode_rdev(struct inode *inode, struct page *node_page) 85 { 86 __le32 *addr = get_dnode_addr(inode, node_page); 87 88 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 89 if (old_valid_dev(inode->i_rdev)) { 90 addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev)); 91 addr[1] = 0; 92 } else { 93 addr[0] = 0; 94 addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev)); 95 addr[2] = 0; 96 } 97 } 98 } 99 100 static void __recover_inline_status(struct inode *inode, struct page *ipage) 101 { 102 void *inline_data = inline_data_addr(inode, ipage); 103 __le32 *start = inline_data; 104 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32); 105 106 while (start < end) { 107 if (*start++) { 108 f2fs_wait_on_page_writeback(ipage, NODE, true, true); 109 110 set_inode_flag(inode, FI_DATA_EXIST); 111 set_raw_inline(inode, F2FS_INODE(ipage)); 112 set_page_dirty(ipage); 113 return; 114 } 115 } 116 return; 117 } 118 119 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page) 120 { 121 struct f2fs_inode *ri = &F2FS_NODE(page)->i; 122 123 if (!f2fs_sb_has_inode_chksum(sbi)) 124 return false; 125 126 if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR)) 127 return false; 128 129 if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize), 130 i_inode_checksum)) 131 return false; 132 133 return true; 134 } 135 136 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page) 137 { 138 struct f2fs_node *node = F2FS_NODE(page); 139 struct f2fs_inode *ri = &node->i; 140 __le32 ino = node->footer.ino; 141 __le32 gen = ri->i_generation; 142 __u32 chksum, chksum_seed; 143 __u32 dummy_cs = 0; 144 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum); 145 unsigned int cs_size = sizeof(dummy_cs); 146 147 chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino, 148 sizeof(ino)); 149 chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen)); 150 151 chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset); 152 chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size); 153 offset += cs_size; 154 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset, 155 F2FS_BLKSIZE - offset); 156 return chksum; 157 } 158 159 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page) 160 { 161 struct f2fs_inode *ri; 162 __u32 provided, calculated; 163 164 if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN))) 165 return true; 166 167 #ifdef CONFIG_F2FS_CHECK_FS 168 if (!f2fs_enable_inode_chksum(sbi, page)) 169 #else 170 if (!f2fs_enable_inode_chksum(sbi, page) || 171 PageDirty(page) || 172 folio_test_writeback(page_folio(page))) 173 #endif 174 return true; 175 176 ri = &F2FS_NODE(page)->i; 177 provided = le32_to_cpu(ri->i_inode_checksum); 178 calculated = f2fs_inode_chksum(sbi, page); 179 180 if (provided != calculated) 181 f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x", 182 page_folio(page)->index, ino_of_node(page), 183 provided, calculated); 184 185 return provided == calculated; 186 } 187 188 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page) 189 { 190 struct f2fs_inode *ri = &F2FS_NODE(page)->i; 191 192 if (!f2fs_enable_inode_chksum(sbi, page)) 193 return; 194 195 ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page)); 196 } 197 198 static bool sanity_check_compress_inode(struct inode *inode, 199 struct f2fs_inode *ri) 200 { 201 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 202 unsigned char clevel; 203 204 if (ri->i_compress_algorithm >= COMPRESS_MAX) { 205 f2fs_warn(sbi, 206 "%s: inode (ino=%lx) has unsupported compress algorithm: %u, run fsck to fix", 207 __func__, inode->i_ino, ri->i_compress_algorithm); 208 return false; 209 } 210 if (le64_to_cpu(ri->i_compr_blocks) > 211 SECTOR_TO_BLOCK(inode->i_blocks)) { 212 f2fs_warn(sbi, 213 "%s: inode (ino=%lx) has inconsistent i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix", 214 __func__, inode->i_ino, le64_to_cpu(ri->i_compr_blocks), 215 SECTOR_TO_BLOCK(inode->i_blocks)); 216 return false; 217 } 218 if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE || 219 ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) { 220 f2fs_warn(sbi, 221 "%s: inode (ino=%lx) has unsupported log cluster size: %u, run fsck to fix", 222 __func__, inode->i_ino, ri->i_log_cluster_size); 223 return false; 224 } 225 226 clevel = le16_to_cpu(ri->i_compress_flag) >> 227 COMPRESS_LEVEL_OFFSET; 228 switch (ri->i_compress_algorithm) { 229 case COMPRESS_LZO: 230 #ifdef CONFIG_F2FS_FS_LZO 231 if (clevel) 232 goto err_level; 233 #endif 234 break; 235 case COMPRESS_LZORLE: 236 #ifdef CONFIG_F2FS_FS_LZORLE 237 if (clevel) 238 goto err_level; 239 #endif 240 break; 241 case COMPRESS_LZ4: 242 #ifdef CONFIG_F2FS_FS_LZ4 243 #ifdef CONFIG_F2FS_FS_LZ4HC 244 if (clevel && 245 (clevel < LZ4HC_MIN_CLEVEL || clevel > LZ4HC_MAX_CLEVEL)) 246 goto err_level; 247 #else 248 if (clevel) 249 goto err_level; 250 #endif 251 #endif 252 break; 253 case COMPRESS_ZSTD: 254 #ifdef CONFIG_F2FS_FS_ZSTD 255 if (clevel < zstd_min_clevel() || clevel > zstd_max_clevel()) 256 goto err_level; 257 #endif 258 break; 259 default: 260 goto err_level; 261 } 262 263 return true; 264 err_level: 265 f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported compress level: %u, run fsck to fix", 266 __func__, inode->i_ino, clevel); 267 return false; 268 } 269 270 static bool sanity_check_inode(struct inode *inode, struct page *node_page) 271 { 272 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 273 struct f2fs_inode_info *fi = F2FS_I(inode); 274 struct f2fs_inode *ri = F2FS_INODE(node_page); 275 unsigned long long iblocks; 276 277 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks); 278 if (!iblocks) { 279 f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.", 280 __func__, inode->i_ino, iblocks); 281 return false; 282 } 283 284 if (ino_of_node(node_page) != nid_of_node(node_page)) { 285 f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.", 286 __func__, inode->i_ino, 287 ino_of_node(node_page), nid_of_node(node_page)); 288 return false; 289 } 290 291 if (f2fs_has_extra_attr(inode)) { 292 if (!f2fs_sb_has_extra_attr(sbi)) { 293 f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off", 294 __func__, inode->i_ino); 295 return false; 296 } 297 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE || 298 fi->i_extra_isize < F2FS_MIN_EXTRA_ATTR_SIZE || 299 fi->i_extra_isize % sizeof(__le32)) { 300 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu", 301 __func__, inode->i_ino, fi->i_extra_isize, 302 F2FS_TOTAL_EXTRA_ATTR_SIZE); 303 return false; 304 } 305 if (f2fs_sb_has_flexible_inline_xattr(sbi) && 306 f2fs_has_inline_xattr(inode) && 307 (!fi->i_inline_xattr_size || 308 fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) { 309 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %lu", 310 __func__, inode->i_ino, fi->i_inline_xattr_size, 311 MAX_INLINE_XATTR_SIZE); 312 return false; 313 } 314 if (f2fs_sb_has_compression(sbi) && 315 fi->i_flags & F2FS_COMPR_FL && 316 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, 317 i_compress_flag)) { 318 if (!sanity_check_compress_inode(inode, ri)) 319 return false; 320 } 321 } 322 323 if (!f2fs_sb_has_extra_attr(sbi)) { 324 if (f2fs_sb_has_project_quota(sbi)) { 325 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.", 326 __func__, inode->i_ino, F2FS_FEATURE_PRJQUOTA); 327 return false; 328 } 329 if (f2fs_sb_has_inode_chksum(sbi)) { 330 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.", 331 __func__, inode->i_ino, F2FS_FEATURE_INODE_CHKSUM); 332 return false; 333 } 334 if (f2fs_sb_has_flexible_inline_xattr(sbi)) { 335 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.", 336 __func__, inode->i_ino, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR); 337 return false; 338 } 339 if (f2fs_sb_has_inode_crtime(sbi)) { 340 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.", 341 __func__, inode->i_ino, F2FS_FEATURE_INODE_CRTIME); 342 return false; 343 } 344 if (f2fs_sb_has_compression(sbi)) { 345 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.", 346 __func__, inode->i_ino, F2FS_FEATURE_COMPRESSION); 347 return false; 348 } 349 } 350 351 if (f2fs_sanity_check_inline_data(inode, node_page)) { 352 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix", 353 __func__, inode->i_ino, inode->i_mode); 354 return false; 355 } 356 357 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) { 358 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix", 359 __func__, inode->i_ino, inode->i_mode); 360 return false; 361 } 362 363 if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) { 364 f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off", 365 __func__, inode->i_ino); 366 return false; 367 } 368 369 if (fi->i_xattr_nid && f2fs_check_nid_range(sbi, fi->i_xattr_nid)) { 370 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_xattr_nid: %u, run fsck to fix.", 371 __func__, inode->i_ino, fi->i_xattr_nid); 372 return false; 373 } 374 375 if (IS_DEVICE_ALIASING(inode)) { 376 if (!f2fs_sb_has_device_alias(sbi)) { 377 f2fs_warn(sbi, "%s: inode (ino=%lx) has device alias flag, but the feature is off", 378 __func__, inode->i_ino); 379 return false; 380 } 381 if (!f2fs_is_pinned_file(inode)) { 382 f2fs_warn(sbi, "%s: inode (ino=%lx) has device alias flag, but is not pinned", 383 __func__, inode->i_ino); 384 return false; 385 } 386 } 387 388 return true; 389 } 390 391 static void init_idisk_time(struct inode *inode) 392 { 393 struct f2fs_inode_info *fi = F2FS_I(inode); 394 395 fi->i_disk_time[0] = inode_get_atime(inode); 396 fi->i_disk_time[1] = inode_get_ctime(inode); 397 fi->i_disk_time[2] = inode_get_mtime(inode); 398 } 399 400 static int do_read_inode(struct inode *inode) 401 { 402 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 403 struct f2fs_inode_info *fi = F2FS_I(inode); 404 struct page *node_page; 405 struct f2fs_inode *ri; 406 projid_t i_projid; 407 408 /* Check if ino is within scope */ 409 if (f2fs_check_nid_range(sbi, inode->i_ino)) 410 return -EINVAL; 411 412 node_page = f2fs_get_node_page(sbi, inode->i_ino); 413 if (IS_ERR(node_page)) 414 return PTR_ERR(node_page); 415 416 ri = F2FS_INODE(node_page); 417 418 inode->i_mode = le16_to_cpu(ri->i_mode); 419 i_uid_write(inode, le32_to_cpu(ri->i_uid)); 420 i_gid_write(inode, le32_to_cpu(ri->i_gid)); 421 set_nlink(inode, le32_to_cpu(ri->i_links)); 422 inode->i_size = le64_to_cpu(ri->i_size); 423 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1); 424 425 inode_set_atime(inode, le64_to_cpu(ri->i_atime), 426 le32_to_cpu(ri->i_atime_nsec)); 427 inode_set_ctime(inode, le64_to_cpu(ri->i_ctime), 428 le32_to_cpu(ri->i_ctime_nsec)); 429 inode_set_mtime(inode, le64_to_cpu(ri->i_mtime), 430 le32_to_cpu(ri->i_mtime_nsec)); 431 inode->i_generation = le32_to_cpu(ri->i_generation); 432 if (S_ISDIR(inode->i_mode)) 433 fi->i_current_depth = le32_to_cpu(ri->i_current_depth); 434 else if (S_ISREG(inode->i_mode)) 435 fi->i_gc_failures = le16_to_cpu(ri->i_gc_failures); 436 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid); 437 fi->i_flags = le32_to_cpu(ri->i_flags); 438 if (S_ISREG(inode->i_mode)) 439 fi->i_flags &= ~F2FS_PROJINHERIT_FL; 440 bitmap_zero(fi->flags, FI_MAX); 441 fi->i_advise = ri->i_advise; 442 fi->i_pino = le32_to_cpu(ri->i_pino); 443 fi->i_dir_level = ri->i_dir_level; 444 445 get_inline_info(inode, ri); 446 447 fi->i_extra_isize = f2fs_has_extra_attr(inode) ? 448 le16_to_cpu(ri->i_extra_isize) : 0; 449 450 if (f2fs_sb_has_flexible_inline_xattr(sbi)) { 451 fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size); 452 } else if (f2fs_has_inline_xattr(inode) || 453 f2fs_has_inline_dentry(inode)) { 454 fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; 455 } else { 456 457 /* 458 * Previous inline data or directory always reserved 200 bytes 459 * in inode layout, even if inline_xattr is disabled. In order 460 * to keep inline_dentry's structure for backward compatibility, 461 * we get the space back only from inline_data. 462 */ 463 fi->i_inline_xattr_size = 0; 464 } 465 466 if (!sanity_check_inode(inode, node_page)) { 467 f2fs_put_page(node_page, 1); 468 set_sbi_flag(sbi, SBI_NEED_FSCK); 469 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); 470 return -EFSCORRUPTED; 471 } 472 473 /* check data exist */ 474 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode)) 475 __recover_inline_status(inode, node_page); 476 477 /* try to recover cold bit for non-dir inode */ 478 if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) { 479 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 480 set_cold_node(node_page, false); 481 set_page_dirty(node_page); 482 } 483 484 /* get rdev by using inline_info */ 485 __get_inode_rdev(inode, node_page); 486 487 if (!f2fs_need_inode_block_update(sbi, inode->i_ino)) 488 fi->last_disk_size = inode->i_size; 489 490 if (fi->i_flags & F2FS_PROJINHERIT_FL) 491 set_inode_flag(inode, FI_PROJ_INHERIT); 492 493 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) && 494 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid)) 495 i_projid = (projid_t)le32_to_cpu(ri->i_projid); 496 else 497 i_projid = F2FS_DEF_PROJID; 498 fi->i_projid = make_kprojid(&init_user_ns, i_projid); 499 500 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) && 501 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 502 fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime); 503 fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec); 504 } 505 506 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) && 507 (fi->i_flags & F2FS_COMPR_FL)) { 508 if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, 509 i_compress_flag)) { 510 unsigned short compress_flag; 511 512 atomic_set(&fi->i_compr_blocks, 513 le64_to_cpu(ri->i_compr_blocks)); 514 fi->i_compress_algorithm = ri->i_compress_algorithm; 515 fi->i_log_cluster_size = ri->i_log_cluster_size; 516 compress_flag = le16_to_cpu(ri->i_compress_flag); 517 fi->i_compress_level = compress_flag >> 518 COMPRESS_LEVEL_OFFSET; 519 fi->i_compress_flag = compress_flag & 520 GENMASK(COMPRESS_LEVEL_OFFSET - 1, 0); 521 fi->i_cluster_size = BIT(fi->i_log_cluster_size); 522 set_inode_flag(inode, FI_COMPRESSED_FILE); 523 } 524 } 525 526 init_idisk_time(inode); 527 528 if (!sanity_check_extent_cache(inode, node_page)) { 529 f2fs_put_page(node_page, 1); 530 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); 531 return -EFSCORRUPTED; 532 } 533 534 /* Need all the flag bits */ 535 f2fs_init_read_extent_tree(inode, node_page); 536 f2fs_init_age_extent_tree(inode); 537 538 f2fs_put_page(node_page, 1); 539 540 stat_inc_inline_xattr(inode); 541 stat_inc_inline_inode(inode); 542 stat_inc_inline_dir(inode); 543 stat_inc_compr_inode(inode); 544 stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks)); 545 546 return 0; 547 } 548 549 static bool is_meta_ino(struct f2fs_sb_info *sbi, unsigned int ino) 550 { 551 return ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi) || 552 ino == F2FS_COMPRESS_INO(sbi); 553 } 554 555 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) 556 { 557 struct f2fs_sb_info *sbi = F2FS_SB(sb); 558 struct inode *inode; 559 int ret = 0; 560 561 inode = iget_locked(sb, ino); 562 if (!inode) 563 return ERR_PTR(-ENOMEM); 564 565 if (!(inode->i_state & I_NEW)) { 566 if (is_meta_ino(sbi, ino)) { 567 f2fs_err(sbi, "inaccessible inode: %lu, run fsck to repair", ino); 568 set_sbi_flag(sbi, SBI_NEED_FSCK); 569 ret = -EFSCORRUPTED; 570 trace_f2fs_iget_exit(inode, ret); 571 iput(inode); 572 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); 573 return ERR_PTR(ret); 574 } 575 576 trace_f2fs_iget(inode); 577 return inode; 578 } 579 580 if (is_meta_ino(sbi, ino)) 581 goto make_now; 582 583 ret = do_read_inode(inode); 584 if (ret) 585 goto bad_inode; 586 make_now: 587 if (ino == F2FS_NODE_INO(sbi)) { 588 inode->i_mapping->a_ops = &f2fs_node_aops; 589 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 590 } else if (ino == F2FS_META_INO(sbi)) { 591 inode->i_mapping->a_ops = &f2fs_meta_aops; 592 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 593 } else if (ino == F2FS_COMPRESS_INO(sbi)) { 594 #ifdef CONFIG_F2FS_FS_COMPRESSION 595 inode->i_mapping->a_ops = &f2fs_compress_aops; 596 /* 597 * generic_error_remove_folio only truncates pages of regular 598 * inode 599 */ 600 inode->i_mode |= S_IFREG; 601 #endif 602 mapping_set_gfp_mask(inode->i_mapping, 603 GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE); 604 } else if (S_ISREG(inode->i_mode)) { 605 inode->i_op = &f2fs_file_inode_operations; 606 inode->i_fop = &f2fs_file_operations; 607 inode->i_mapping->a_ops = &f2fs_dblock_aops; 608 } else if (S_ISDIR(inode->i_mode)) { 609 inode->i_op = &f2fs_dir_inode_operations; 610 inode->i_fop = &f2fs_dir_operations; 611 inode->i_mapping->a_ops = &f2fs_dblock_aops; 612 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 613 } else if (S_ISLNK(inode->i_mode)) { 614 if (file_is_encrypt(inode)) 615 inode->i_op = &f2fs_encrypted_symlink_inode_operations; 616 else 617 inode->i_op = &f2fs_symlink_inode_operations; 618 inode_nohighmem(inode); 619 inode->i_mapping->a_ops = &f2fs_dblock_aops; 620 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 621 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 622 inode->i_op = &f2fs_special_inode_operations; 623 init_special_inode(inode, inode->i_mode, inode->i_rdev); 624 } else { 625 ret = -EIO; 626 goto bad_inode; 627 } 628 f2fs_set_inode_flags(inode); 629 630 unlock_new_inode(inode); 631 trace_f2fs_iget(inode); 632 return inode; 633 634 bad_inode: 635 f2fs_inode_synced(inode); 636 iget_failed(inode); 637 trace_f2fs_iget_exit(inode, ret); 638 return ERR_PTR(ret); 639 } 640 641 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino) 642 { 643 struct inode *inode; 644 retry: 645 inode = f2fs_iget(sb, ino); 646 if (IS_ERR(inode)) { 647 if (PTR_ERR(inode) == -ENOMEM) { 648 memalloc_retry_wait(GFP_NOFS); 649 goto retry; 650 } 651 } 652 return inode; 653 } 654 655 void f2fs_update_inode(struct inode *inode, struct page *node_page) 656 { 657 struct f2fs_inode_info *fi = F2FS_I(inode); 658 struct f2fs_inode *ri; 659 struct extent_tree *et = fi->extent_tree[EX_READ]; 660 661 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 662 set_page_dirty(node_page); 663 664 f2fs_inode_synced(inode); 665 666 ri = F2FS_INODE(node_page); 667 668 ri->i_mode = cpu_to_le16(inode->i_mode); 669 ri->i_advise = fi->i_advise; 670 ri->i_uid = cpu_to_le32(i_uid_read(inode)); 671 ri->i_gid = cpu_to_le32(i_gid_read(inode)); 672 ri->i_links = cpu_to_le32(inode->i_nlink); 673 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1); 674 675 if (!f2fs_is_atomic_file(inode) || 676 is_inode_flag_set(inode, FI_ATOMIC_COMMITTED)) 677 ri->i_size = cpu_to_le64(i_size_read(inode)); 678 679 if (et) { 680 read_lock(&et->lock); 681 set_raw_read_extent(&et->largest, &ri->i_ext); 682 read_unlock(&et->lock); 683 } else { 684 memset(&ri->i_ext, 0, sizeof(ri->i_ext)); 685 } 686 set_raw_inline(inode, ri); 687 688 ri->i_atime = cpu_to_le64(inode_get_atime_sec(inode)); 689 ri->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode)); 690 ri->i_mtime = cpu_to_le64(inode_get_mtime_sec(inode)); 691 ri->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode)); 692 ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode)); 693 ri->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode)); 694 if (S_ISDIR(inode->i_mode)) 695 ri->i_current_depth = cpu_to_le32(fi->i_current_depth); 696 else if (S_ISREG(inode->i_mode)) 697 ri->i_gc_failures = cpu_to_le16(fi->i_gc_failures); 698 ri->i_xattr_nid = cpu_to_le32(fi->i_xattr_nid); 699 ri->i_flags = cpu_to_le32(fi->i_flags); 700 ri->i_pino = cpu_to_le32(fi->i_pino); 701 ri->i_generation = cpu_to_le32(inode->i_generation); 702 ri->i_dir_level = fi->i_dir_level; 703 704 if (f2fs_has_extra_attr(inode)) { 705 ri->i_extra_isize = cpu_to_le16(fi->i_extra_isize); 706 707 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode))) 708 ri->i_inline_xattr_size = 709 cpu_to_le16(fi->i_inline_xattr_size); 710 711 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) && 712 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid)) { 713 projid_t i_projid; 714 715 i_projid = from_kprojid(&init_user_ns, fi->i_projid); 716 ri->i_projid = cpu_to_le32(i_projid); 717 } 718 719 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) && 720 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 721 ri->i_crtime = cpu_to_le64(fi->i_crtime.tv_sec); 722 ri->i_crtime_nsec = cpu_to_le32(fi->i_crtime.tv_nsec); 723 } 724 725 if (f2fs_sb_has_compression(F2FS_I_SB(inode)) && 726 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, 727 i_compress_flag)) { 728 unsigned short compress_flag; 729 730 ri->i_compr_blocks = cpu_to_le64( 731 atomic_read(&fi->i_compr_blocks)); 732 ri->i_compress_algorithm = fi->i_compress_algorithm; 733 compress_flag = fi->i_compress_flag | 734 fi->i_compress_level << 735 COMPRESS_LEVEL_OFFSET; 736 ri->i_compress_flag = cpu_to_le16(compress_flag); 737 ri->i_log_cluster_size = fi->i_log_cluster_size; 738 } 739 } 740 741 __set_inode_rdev(inode, node_page); 742 743 /* deleted inode */ 744 if (inode->i_nlink == 0) 745 clear_page_private_inline(node_page); 746 747 init_idisk_time(inode); 748 #ifdef CONFIG_F2FS_CHECK_FS 749 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page); 750 #endif 751 } 752 753 void f2fs_update_inode_page(struct inode *inode) 754 { 755 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 756 struct page *node_page; 757 int count = 0; 758 retry: 759 node_page = f2fs_get_node_page(sbi, inode->i_ino); 760 if (IS_ERR(node_page)) { 761 int err = PTR_ERR(node_page); 762 763 /* The node block was truncated. */ 764 if (err == -ENOENT) 765 return; 766 767 if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT) 768 goto retry; 769 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE); 770 return; 771 } 772 f2fs_update_inode(inode, node_page); 773 f2fs_put_page(node_page, 1); 774 } 775 776 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) 777 { 778 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 779 780 if (inode->i_ino == F2FS_NODE_INO(sbi) || 781 inode->i_ino == F2FS_META_INO(sbi)) 782 return 0; 783 784 /* 785 * atime could be updated without dirtying f2fs inode in lazytime mode 786 */ 787 if (f2fs_is_time_consistent(inode) && 788 !is_inode_flag_set(inode, FI_DIRTY_INODE)) 789 return 0; 790 791 if (!f2fs_is_checkpoint_ready(sbi)) { 792 f2fs_mark_inode_dirty_sync(inode, true); 793 return -ENOSPC; 794 } 795 796 /* 797 * We need to balance fs here to prevent from producing dirty node pages 798 * during the urgent cleaning time when running out of free sections. 799 */ 800 f2fs_update_inode_page(inode); 801 if (wbc && wbc->nr_to_write) 802 f2fs_balance_fs(sbi, true); 803 return 0; 804 } 805 806 /* 807 * Called at the last iput() if i_nlink is zero 808 */ 809 void f2fs_evict_inode(struct inode *inode) 810 { 811 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 812 struct f2fs_inode_info *fi = F2FS_I(inode); 813 nid_t xnid = fi->i_xattr_nid; 814 int err = 0; 815 bool freeze_protected = false; 816 817 f2fs_abort_atomic_write(inode, true); 818 819 if (fi->cow_inode && f2fs_is_cow_file(fi->cow_inode)) { 820 clear_inode_flag(fi->cow_inode, FI_COW_FILE); 821 F2FS_I(fi->cow_inode)->atomic_inode = NULL; 822 iput(fi->cow_inode); 823 fi->cow_inode = NULL; 824 } 825 826 trace_f2fs_evict_inode(inode); 827 truncate_inode_pages_final(&inode->i_data); 828 829 if ((inode->i_nlink || is_bad_inode(inode)) && 830 test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode)) 831 f2fs_invalidate_compress_pages(sbi, inode->i_ino); 832 833 if (inode->i_ino == F2FS_NODE_INO(sbi) || 834 inode->i_ino == F2FS_META_INO(sbi) || 835 inode->i_ino == F2FS_COMPRESS_INO(sbi)) 836 goto out_clear; 837 838 f2fs_bug_on(sbi, get_dirty_pages(inode)); 839 f2fs_remove_dirty_inode(inode); 840 841 if (!IS_DEVICE_ALIASING(inode)) 842 f2fs_destroy_extent_tree(inode); 843 844 if (inode->i_nlink || is_bad_inode(inode)) 845 goto no_delete; 846 847 err = f2fs_dquot_initialize(inode); 848 if (err) { 849 err = 0; 850 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 851 } 852 853 f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO); 854 f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO); 855 f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO); 856 857 if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING)) { 858 sb_start_intwrite(inode->i_sb); 859 freeze_protected = true; 860 } 861 set_inode_flag(inode, FI_NO_ALLOC); 862 i_size_write(inode, 0); 863 retry: 864 if (F2FS_HAS_BLOCKS(inode)) 865 err = f2fs_truncate(inode); 866 867 if (time_to_inject(sbi, FAULT_EVICT_INODE)) 868 err = -EIO; 869 870 if (!err) { 871 f2fs_lock_op(sbi); 872 err = f2fs_remove_inode_page(inode); 873 f2fs_unlock_op(sbi); 874 if (err == -ENOENT) { 875 err = 0; 876 877 /* 878 * in fuzzed image, another node may has the same 879 * block address as inode's, if it was truncated 880 * previously, truncation of inode node will fail. 881 */ 882 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) { 883 f2fs_warn(F2FS_I_SB(inode), 884 "f2fs_evict_inode: inconsistent node id, ino:%lu", 885 inode->i_ino); 886 f2fs_inode_synced(inode); 887 set_sbi_flag(sbi, SBI_NEED_FSCK); 888 } 889 } 890 } 891 892 /* give more chances, if ENOMEM case */ 893 if (err == -ENOMEM) { 894 err = 0; 895 goto retry; 896 } 897 898 if (IS_DEVICE_ALIASING(inode)) 899 f2fs_destroy_extent_tree(inode); 900 901 if (err) { 902 f2fs_update_inode_page(inode); 903 if (dquot_initialize_needed(inode)) 904 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 905 } 906 if (freeze_protected) 907 sb_end_intwrite(inode->i_sb); 908 no_delete: 909 dquot_drop(inode); 910 911 stat_dec_inline_xattr(inode); 912 stat_dec_inline_dir(inode); 913 stat_dec_inline_inode(inode); 914 stat_dec_compr_inode(inode); 915 stat_sub_compr_blocks(inode, 916 atomic_read(&fi->i_compr_blocks)); 917 918 if (likely(!f2fs_cp_error(sbi) && 919 !is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 920 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE)); 921 else 922 f2fs_inode_synced(inode); 923 924 /* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */ 925 if (inode->i_ino) 926 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, 927 inode->i_ino); 928 if (xnid) 929 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); 930 if (inode->i_nlink) { 931 if (is_inode_flag_set(inode, FI_APPEND_WRITE)) 932 f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO); 933 if (is_inode_flag_set(inode, FI_UPDATE_WRITE)) 934 f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO); 935 } 936 if (is_inode_flag_set(inode, FI_FREE_NID)) { 937 f2fs_alloc_nid_failed(sbi, inode->i_ino); 938 clear_inode_flag(inode, FI_FREE_NID); 939 } else { 940 /* 941 * If xattr nid is corrupted, we can reach out error condition, 942 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)). 943 * In that case, f2fs_check_nid_range() is enough to give a clue. 944 */ 945 } 946 out_clear: 947 fscrypt_put_encryption_info(inode); 948 fsverity_cleanup_inode(inode); 949 clear_inode(inode); 950 } 951 952 /* caller should call f2fs_lock_op() */ 953 void f2fs_handle_failed_inode(struct inode *inode) 954 { 955 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 956 struct node_info ni; 957 int err; 958 959 /* 960 * clear nlink of inode in order to release resource of inode 961 * immediately. 962 */ 963 clear_nlink(inode); 964 965 /* 966 * we must call this to avoid inode being remained as dirty, resulting 967 * in a panic when flushing dirty inodes in gdirty_list. 968 */ 969 f2fs_update_inode_page(inode); 970 f2fs_inode_synced(inode); 971 972 /* don't make bad inode, since it becomes a regular file. */ 973 unlock_new_inode(inode); 974 975 /* 976 * Note: we should add inode to orphan list before f2fs_unlock_op() 977 * so we can prevent losing this orphan when encoutering checkpoint 978 * and following suddenly power-off. 979 */ 980 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false); 981 if (err) { 982 set_sbi_flag(sbi, SBI_NEED_FSCK); 983 set_inode_flag(inode, FI_FREE_NID); 984 f2fs_warn(sbi, "May loss orphan inode, run fsck to fix."); 985 goto out; 986 } 987 988 if (ni.blk_addr != NULL_ADDR) { 989 err = f2fs_acquire_orphan_inode(sbi); 990 if (err) { 991 set_sbi_flag(sbi, SBI_NEED_FSCK); 992 f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix."); 993 } else { 994 f2fs_add_orphan_inode(inode); 995 } 996 f2fs_alloc_nid_done(sbi, inode->i_ino); 997 } else { 998 set_inode_flag(inode, FI_FREE_NID); 999 } 1000 1001 out: 1002 f2fs_unlock_op(sbi); 1003 1004 /* iput will drop the inode object */ 1005 iput(inode); 1006 } 1007