1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/super.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/module.h> 9 #include <linux/init.h> 10 #include <linux/fs.h> 11 #include <linux/fs_context.h> 12 #include <linux/sched/mm.h> 13 #include <linux/statfs.h> 14 #include <linux/kthread.h> 15 #include <linux/parser.h> 16 #include <linux/mount.h> 17 #include <linux/seq_file.h> 18 #include <linux/proc_fs.h> 19 #include <linux/random.h> 20 #include <linux/exportfs.h> 21 #include <linux/blkdev.h> 22 #include <linux/quotaops.h> 23 #include <linux/f2fs_fs.h> 24 #include <linux/sysfs.h> 25 #include <linux/quota.h> 26 #include <linux/unicode.h> 27 #include <linux/part_stat.h> 28 #include <linux/zstd.h> 29 #include <linux/lz4.h> 30 #include <linux/ctype.h> 31 #include <linux/fs_parser.h> 32 33 #include "f2fs.h" 34 #include "node.h" 35 #include "segment.h" 36 #include "xattr.h" 37 #include "gc.h" 38 #include "iostat.h" 39 40 #define CREATE_TRACE_POINTS 41 #include <trace/events/f2fs.h> 42 43 static struct kmem_cache *f2fs_inode_cachep; 44 45 #ifdef CONFIG_F2FS_FAULT_INJECTION 46 47 const char *f2fs_fault_name[FAULT_MAX] = { 48 [FAULT_KMALLOC] = "kmalloc", 49 [FAULT_KVMALLOC] = "kvmalloc", 50 [FAULT_PAGE_ALLOC] = "page alloc", 51 [FAULT_PAGE_GET] = "page get", 52 [FAULT_ALLOC_BIO] = "alloc bio(obsolete)", 53 [FAULT_ALLOC_NID] = "alloc nid", 54 [FAULT_ORPHAN] = "orphan", 55 [FAULT_BLOCK] = "no more block", 56 [FAULT_DIR_DEPTH] = "too big dir depth", 57 [FAULT_EVICT_INODE] = "evict_inode fail", 58 [FAULT_TRUNCATE] = "truncate fail", 59 [FAULT_READ_IO] = "read IO error", 60 [FAULT_CHECKPOINT] = "checkpoint error", 61 [FAULT_DISCARD] = "discard error", 62 [FAULT_WRITE_IO] = "write IO error", 63 [FAULT_SLAB_ALLOC] = "slab alloc", 64 [FAULT_DQUOT_INIT] = "dquot initialize", 65 [FAULT_LOCK_OP] = "lock_op", 66 [FAULT_BLKADDR_VALIDITY] = "invalid blkaddr", 67 [FAULT_BLKADDR_CONSISTENCE] = "inconsistent blkaddr", 68 [FAULT_NO_SEGMENT] = "no free segment", 69 [FAULT_INCONSISTENT_FOOTER] = "inconsistent footer", 70 [FAULT_ATOMIC_TIMEOUT] = "atomic timeout", 71 [FAULT_VMALLOC] = "vmalloc", 72 [FAULT_LOCK_TIMEOUT] = "lock timeout", 73 [FAULT_SKIP_WRITE] = "skip write", 74 }; 75 76 int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate, 77 unsigned long type, enum fault_option fo) 78 { 79 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; 80 81 if (fo & FAULT_ALL) { 82 memset(ffi, 0, sizeof(struct f2fs_fault_info)); 83 return 0; 84 } 85 86 if (fo & FAULT_RATE) { 87 if (rate > INT_MAX) 88 return -EINVAL; 89 atomic_set(&ffi->inject_ops, 0); 90 ffi->inject_rate = (int)rate; 91 f2fs_info(sbi, "build fault injection rate: %lu", rate); 92 } 93 94 if (fo & FAULT_TYPE) { 95 if (type >= BIT(FAULT_MAX)) 96 return -EINVAL; 97 ffi->inject_type = (unsigned int)type; 98 f2fs_info(sbi, "build fault injection type: 0x%lx", type); 99 } 100 101 if (fo & FAULT_TIMEOUT) { 102 if (type >= TIMEOUT_TYPE_MAX) 103 return -EINVAL; 104 ffi->inject_lock_timeout = (unsigned int)type; 105 f2fs_info(sbi, "build fault timeout injection type: 0x%lx", type); 106 } 107 108 return 0; 109 } 110 111 static void inject_timeout(struct f2fs_sb_info *sbi) 112 { 113 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; 114 enum f2fs_timeout_type type = ffi->inject_lock_timeout; 115 unsigned long start_time = jiffies; 116 unsigned long timeout = HZ; 117 118 switch (type) { 119 case TIMEOUT_TYPE_RUNNING: 120 while (!time_after(jiffies, start_time + timeout)) { 121 if (fatal_signal_pending(current)) 122 return; 123 ; 124 } 125 break; 126 case TIMEOUT_TYPE_IO_SLEEP: 127 f2fs_schedule_timeout_killable(timeout, true); 128 break; 129 case TIMEOUT_TYPE_NONIO_SLEEP: 130 f2fs_schedule_timeout_killable(timeout, false); 131 break; 132 case TIMEOUT_TYPE_RUNNABLE: 133 while (!time_after(jiffies, start_time + timeout)) { 134 if (fatal_signal_pending(current)) 135 return; 136 schedule(); 137 } 138 break; 139 default: 140 return; 141 } 142 } 143 144 void f2fs_simulate_lock_timeout(struct f2fs_sb_info *sbi) 145 { 146 struct f2fs_lock_context lc; 147 148 f2fs_lock_op(sbi, &lc); 149 inject_timeout(sbi); 150 f2fs_unlock_op(sbi, &lc); 151 } 152 #endif 153 154 /* f2fs-wide shrinker description */ 155 static struct shrinker *f2fs_shrinker_info; 156 157 static int __init f2fs_init_shrinker(void) 158 { 159 f2fs_shrinker_info = shrinker_alloc(0, "f2fs-shrinker"); 160 if (!f2fs_shrinker_info) 161 return -ENOMEM; 162 163 f2fs_shrinker_info->count_objects = f2fs_shrink_count; 164 f2fs_shrinker_info->scan_objects = f2fs_shrink_scan; 165 166 shrinker_register(f2fs_shrinker_info); 167 168 return 0; 169 } 170 171 static void f2fs_exit_shrinker(void) 172 { 173 shrinker_free(f2fs_shrinker_info); 174 } 175 176 enum { 177 Opt_gc_background, 178 Opt_disable_roll_forward, 179 Opt_norecovery, 180 Opt_discard, 181 Opt_noheap, 182 Opt_heap, 183 Opt_user_xattr, 184 Opt_acl, 185 Opt_active_logs, 186 Opt_disable_ext_identify, 187 Opt_inline_xattr, 188 Opt_inline_xattr_size, 189 Opt_inline_data, 190 Opt_inline_dentry, 191 Opt_flush_merge, 192 Opt_barrier, 193 Opt_fastboot, 194 Opt_extent_cache, 195 Opt_data_flush, 196 Opt_reserve_root, 197 Opt_reserve_node, 198 Opt_resgid, 199 Opt_resuid, 200 Opt_mode, 201 Opt_fault_injection, 202 Opt_fault_type, 203 Opt_lazytime, 204 Opt_quota, 205 Opt_usrquota, 206 Opt_grpquota, 207 Opt_prjquota, 208 Opt_usrjquota, 209 Opt_grpjquota, 210 Opt_prjjquota, 211 Opt_alloc, 212 Opt_fsync, 213 Opt_test_dummy_encryption, 214 Opt_inlinecrypt, 215 Opt_checkpoint_disable, 216 Opt_checkpoint_disable_cap, 217 Opt_checkpoint_disable_cap_perc, 218 Opt_checkpoint_enable, 219 Opt_checkpoint_merge, 220 Opt_compress_algorithm, 221 Opt_compress_log_size, 222 Opt_nocompress_extension, 223 Opt_compress_extension, 224 Opt_compress_chksum, 225 Opt_compress_mode, 226 Opt_compress_cache, 227 Opt_atgc, 228 Opt_gc_merge, 229 Opt_discard_unit, 230 Opt_memory_mode, 231 Opt_age_extent_cache, 232 Opt_errors, 233 Opt_nat_bits, 234 Opt_jqfmt, 235 Opt_checkpoint, 236 Opt_lookup_mode, 237 Opt_err, 238 }; 239 240 static const struct constant_table f2fs_param_background_gc[] = { 241 {"on", BGGC_MODE_ON}, 242 {"off", BGGC_MODE_OFF}, 243 {"sync", BGGC_MODE_SYNC}, 244 {} 245 }; 246 247 static const struct constant_table f2fs_param_mode[] = { 248 {"adaptive", FS_MODE_ADAPTIVE}, 249 {"lfs", FS_MODE_LFS}, 250 {"fragment:segment", FS_MODE_FRAGMENT_SEG}, 251 {"fragment:block", FS_MODE_FRAGMENT_BLK}, 252 {} 253 }; 254 255 static const struct constant_table f2fs_param_jqfmt[] = { 256 {"vfsold", QFMT_VFS_OLD}, 257 {"vfsv0", QFMT_VFS_V0}, 258 {"vfsv1", QFMT_VFS_V1}, 259 {} 260 }; 261 262 static const struct constant_table f2fs_param_alloc_mode[] = { 263 {"default", ALLOC_MODE_DEFAULT}, 264 {"reuse", ALLOC_MODE_REUSE}, 265 {} 266 }; 267 static const struct constant_table f2fs_param_fsync_mode[] = { 268 {"posix", FSYNC_MODE_POSIX}, 269 {"strict", FSYNC_MODE_STRICT}, 270 {"nobarrier", FSYNC_MODE_NOBARRIER}, 271 {} 272 }; 273 274 static const struct constant_table f2fs_param_compress_mode[] = { 275 {"fs", COMPR_MODE_FS}, 276 {"user", COMPR_MODE_USER}, 277 {} 278 }; 279 280 static const struct constant_table f2fs_param_discard_unit[] = { 281 {"block", DISCARD_UNIT_BLOCK}, 282 {"segment", DISCARD_UNIT_SEGMENT}, 283 {"section", DISCARD_UNIT_SECTION}, 284 {} 285 }; 286 287 static const struct constant_table f2fs_param_memory_mode[] = { 288 {"normal", MEMORY_MODE_NORMAL}, 289 {"low", MEMORY_MODE_LOW}, 290 {} 291 }; 292 293 static const struct constant_table f2fs_param_errors[] = { 294 {"remount-ro", MOUNT_ERRORS_READONLY}, 295 {"continue", MOUNT_ERRORS_CONTINUE}, 296 {"panic", MOUNT_ERRORS_PANIC}, 297 {} 298 }; 299 300 static const struct constant_table f2fs_param_lookup_mode[] = { 301 {"perf", LOOKUP_PERF}, 302 {"compat", LOOKUP_COMPAT}, 303 {"auto", LOOKUP_AUTO}, 304 {} 305 }; 306 307 static const struct fs_parameter_spec f2fs_param_specs[] = { 308 fsparam_enum("background_gc", Opt_gc_background, f2fs_param_background_gc), 309 fsparam_flag("disable_roll_forward", Opt_disable_roll_forward), 310 fsparam_flag("norecovery", Opt_norecovery), 311 fsparam_flag_no("discard", Opt_discard), 312 fsparam_flag("no_heap", Opt_noheap), 313 fsparam_flag("heap", Opt_heap), 314 fsparam_flag_no("user_xattr", Opt_user_xattr), 315 fsparam_flag_no("acl", Opt_acl), 316 fsparam_s32("active_logs", Opt_active_logs), 317 fsparam_flag("disable_ext_identify", Opt_disable_ext_identify), 318 fsparam_flag_no("inline_xattr", Opt_inline_xattr), 319 fsparam_s32("inline_xattr_size", Opt_inline_xattr_size), 320 fsparam_flag_no("inline_data", Opt_inline_data), 321 fsparam_flag_no("inline_dentry", Opt_inline_dentry), 322 fsparam_flag_no("flush_merge", Opt_flush_merge), 323 fsparam_flag_no("barrier", Opt_barrier), 324 fsparam_flag("fastboot", Opt_fastboot), 325 fsparam_flag_no("extent_cache", Opt_extent_cache), 326 fsparam_flag("data_flush", Opt_data_flush), 327 fsparam_u32("reserve_root", Opt_reserve_root), 328 fsparam_u32("reserve_node", Opt_reserve_node), 329 fsparam_gid("resgid", Opt_resgid), 330 fsparam_uid("resuid", Opt_resuid), 331 fsparam_enum("mode", Opt_mode, f2fs_param_mode), 332 fsparam_s32("fault_injection", Opt_fault_injection), 333 fsparam_u32("fault_type", Opt_fault_type), 334 fsparam_flag_no("lazytime", Opt_lazytime), 335 fsparam_flag_no("quota", Opt_quota), 336 fsparam_flag("usrquota", Opt_usrquota), 337 fsparam_flag("grpquota", Opt_grpquota), 338 fsparam_flag("prjquota", Opt_prjquota), 339 fsparam_string("usrjquota", Opt_usrjquota), 340 fsparam_flag("usrjquota", Opt_usrjquota), 341 fsparam_string("grpjquota", Opt_grpjquota), 342 fsparam_flag("grpjquota", Opt_grpjquota), 343 fsparam_string("prjjquota", Opt_prjjquota), 344 fsparam_flag("prjjquota", Opt_prjjquota), 345 fsparam_flag("nat_bits", Opt_nat_bits), 346 fsparam_enum("jqfmt", Opt_jqfmt, f2fs_param_jqfmt), 347 fsparam_enum("alloc_mode", Opt_alloc, f2fs_param_alloc_mode), 348 fsparam_enum("fsync_mode", Opt_fsync, f2fs_param_fsync_mode), 349 fsparam_string("test_dummy_encryption", Opt_test_dummy_encryption), 350 fsparam_flag("test_dummy_encryption", Opt_test_dummy_encryption), 351 fsparam_flag("inlinecrypt", Opt_inlinecrypt), 352 fsparam_string("checkpoint", Opt_checkpoint), 353 fsparam_flag_no("checkpoint_merge", Opt_checkpoint_merge), 354 fsparam_string("compress_algorithm", Opt_compress_algorithm), 355 fsparam_u32("compress_log_size", Opt_compress_log_size), 356 fsparam_string("compress_extension", Opt_compress_extension), 357 fsparam_string("nocompress_extension", Opt_nocompress_extension), 358 fsparam_flag("compress_chksum", Opt_compress_chksum), 359 fsparam_enum("compress_mode", Opt_compress_mode, f2fs_param_compress_mode), 360 fsparam_flag("compress_cache", Opt_compress_cache), 361 fsparam_flag("atgc", Opt_atgc), 362 fsparam_flag_no("gc_merge", Opt_gc_merge), 363 fsparam_enum("discard_unit", Opt_discard_unit, f2fs_param_discard_unit), 364 fsparam_enum("memory", Opt_memory_mode, f2fs_param_memory_mode), 365 fsparam_flag("age_extent_cache", Opt_age_extent_cache), 366 fsparam_enum("errors", Opt_errors, f2fs_param_errors), 367 fsparam_enum("lookup_mode", Opt_lookup_mode, f2fs_param_lookup_mode), 368 {} 369 }; 370 371 /* Resort to a match_table for this interestingly formatted option */ 372 static match_table_t f2fs_checkpoint_tokens = { 373 {Opt_checkpoint_disable, "disable"}, 374 {Opt_checkpoint_disable_cap, "disable:%u"}, 375 {Opt_checkpoint_disable_cap_perc, "disable:%u%%"}, 376 {Opt_checkpoint_enable, "enable"}, 377 {Opt_err, NULL}, 378 }; 379 380 #define F2FS_SPEC_background_gc (1 << 0) 381 #define F2FS_SPEC_inline_xattr_size (1 << 1) 382 #define F2FS_SPEC_active_logs (1 << 2) 383 #define F2FS_SPEC_reserve_root (1 << 3) 384 #define F2FS_SPEC_resgid (1 << 4) 385 #define F2FS_SPEC_resuid (1 << 5) 386 #define F2FS_SPEC_mode (1 << 6) 387 #define F2FS_SPEC_fault_injection (1 << 7) 388 #define F2FS_SPEC_fault_type (1 << 8) 389 #define F2FS_SPEC_jqfmt (1 << 9) 390 #define F2FS_SPEC_alloc_mode (1 << 10) 391 #define F2FS_SPEC_fsync_mode (1 << 11) 392 #define F2FS_SPEC_checkpoint_disable_cap (1 << 12) 393 #define F2FS_SPEC_checkpoint_disable_cap_perc (1 << 13) 394 #define F2FS_SPEC_compress_level (1 << 14) 395 #define F2FS_SPEC_compress_algorithm (1 << 15) 396 #define F2FS_SPEC_compress_log_size (1 << 16) 397 #define F2FS_SPEC_compress_extension (1 << 17) 398 #define F2FS_SPEC_nocompress_extension (1 << 18) 399 #define F2FS_SPEC_compress_chksum (1 << 19) 400 #define F2FS_SPEC_compress_mode (1 << 20) 401 #define F2FS_SPEC_discard_unit (1 << 21) 402 #define F2FS_SPEC_memory_mode (1 << 22) 403 #define F2FS_SPEC_errors (1 << 23) 404 #define F2FS_SPEC_lookup_mode (1 << 24) 405 #define F2FS_SPEC_reserve_node (1 << 25) 406 407 struct f2fs_fs_context { 408 struct f2fs_mount_info info; 409 unsigned long long opt_mask; /* Bits changed */ 410 unsigned int spec_mask; 411 unsigned short qname_mask; 412 }; 413 414 #define F2FS_CTX_INFO(ctx) ((ctx)->info) 415 416 static inline void ctx_set_opt(struct f2fs_fs_context *ctx, 417 enum f2fs_mount_opt flag) 418 { 419 ctx->info.opt |= BIT(flag); 420 ctx->opt_mask |= BIT(flag); 421 } 422 423 static inline void ctx_clear_opt(struct f2fs_fs_context *ctx, 424 enum f2fs_mount_opt flag) 425 { 426 ctx->info.opt &= ~BIT(flag); 427 ctx->opt_mask |= BIT(flag); 428 } 429 430 static inline bool ctx_test_opt(struct f2fs_fs_context *ctx, 431 enum f2fs_mount_opt flag) 432 { 433 return ctx->info.opt & BIT(flag); 434 } 435 436 void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, 437 const char *fmt, ...) 438 { 439 struct va_format vaf; 440 va_list args; 441 int level; 442 443 va_start(args, fmt); 444 445 level = printk_get_level(fmt); 446 vaf.fmt = printk_skip_level(fmt); 447 vaf.va = &args; 448 if (limit_rate) 449 if (sbi) 450 printk_ratelimited("%c%cF2FS-fs (%s): %pV\n", 451 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf); 452 else 453 printk_ratelimited("%c%cF2FS-fs: %pV\n", 454 KERN_SOH_ASCII, level, &vaf); 455 else 456 if (sbi) 457 printk("%c%cF2FS-fs (%s): %pV\n", 458 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf); 459 else 460 printk("%c%cF2FS-fs: %pV\n", 461 KERN_SOH_ASCII, level, &vaf); 462 463 va_end(args); 464 } 465 466 #if IS_ENABLED(CONFIG_UNICODE) 467 static const struct f2fs_sb_encodings { 468 __u16 magic; 469 char *name; 470 unsigned int version; 471 } f2fs_sb_encoding_map[] = { 472 {F2FS_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)}, 473 }; 474 475 static const struct f2fs_sb_encodings * 476 f2fs_sb_read_encoding(const struct f2fs_super_block *sb) 477 { 478 __u16 magic = le16_to_cpu(sb->s_encoding); 479 int i; 480 481 for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++) 482 if (magic == f2fs_sb_encoding_map[i].magic) 483 return &f2fs_sb_encoding_map[i]; 484 485 return NULL; 486 } 487 488 struct kmem_cache *f2fs_cf_name_slab; 489 static int __init f2fs_create_casefold_cache(void) 490 { 491 f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name", 492 F2FS_NAME_LEN); 493 return f2fs_cf_name_slab ? 0 : -ENOMEM; 494 } 495 496 static void f2fs_destroy_casefold_cache(void) 497 { 498 kmem_cache_destroy(f2fs_cf_name_slab); 499 } 500 #else 501 static int __init f2fs_create_casefold_cache(void) { return 0; } 502 static void f2fs_destroy_casefold_cache(void) { } 503 #endif 504 505 static inline void limit_reserve_root(struct f2fs_sb_info *sbi) 506 { 507 block_t block_limit = min((sbi->user_block_count >> 3), 508 sbi->user_block_count - sbi->reserved_blocks); 509 block_t node_limit = sbi->total_node_count >> 3; 510 511 /* limit is 12.5% */ 512 if (test_opt(sbi, RESERVE_ROOT) && 513 F2FS_OPTION(sbi).root_reserved_blocks > block_limit) { 514 F2FS_OPTION(sbi).root_reserved_blocks = block_limit; 515 f2fs_info(sbi, "Reduce reserved blocks for root = %u", 516 F2FS_OPTION(sbi).root_reserved_blocks); 517 } 518 if (test_opt(sbi, RESERVE_NODE) && 519 F2FS_OPTION(sbi).root_reserved_nodes > node_limit) { 520 F2FS_OPTION(sbi).root_reserved_nodes = node_limit; 521 f2fs_info(sbi, "Reduce reserved nodes for root = %u", 522 F2FS_OPTION(sbi).root_reserved_nodes); 523 } 524 if (!test_opt(sbi, RESERVE_ROOT) && !test_opt(sbi, RESERVE_NODE) && 525 (!uid_eq(F2FS_OPTION(sbi).s_resuid, 526 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) || 527 !gid_eq(F2FS_OPTION(sbi).s_resgid, 528 make_kgid(&init_user_ns, F2FS_DEF_RESGID)))) 529 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root" 530 " and reserve_node", 531 from_kuid_munged(&init_user_ns, 532 F2FS_OPTION(sbi).s_resuid), 533 from_kgid_munged(&init_user_ns, 534 F2FS_OPTION(sbi).s_resgid)); 535 } 536 537 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi) 538 { 539 if (!F2FS_OPTION(sbi).unusable_cap_perc) 540 return; 541 542 if (F2FS_OPTION(sbi).unusable_cap_perc == 100) 543 F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count; 544 else 545 F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) * 546 F2FS_OPTION(sbi).unusable_cap_perc; 547 548 f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%", 549 F2FS_OPTION(sbi).unusable_cap, 550 F2FS_OPTION(sbi).unusable_cap_perc); 551 } 552 553 static void init_once(void *foo) 554 { 555 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; 556 557 inode_init_once(&fi->vfs_inode); 558 #ifdef CONFIG_FS_ENCRYPTION 559 fi->i_crypt_info = NULL; 560 #endif 561 } 562 563 #ifdef CONFIG_QUOTA 564 static const char * const quotatypes[] = INITQFNAMES; 565 #define QTYPE2NAME(t) (quotatypes[t]) 566 /* 567 * Note the name of the specified quota file. 568 */ 569 static int f2fs_note_qf_name(struct fs_context *fc, int qtype, 570 struct fs_parameter *param) 571 { 572 struct f2fs_fs_context *ctx = fc->fs_private; 573 char *qname; 574 575 if (param->size < 1) { 576 f2fs_err(NULL, "Missing quota name"); 577 return -EINVAL; 578 } 579 if (strchr(param->string, '/')) { 580 f2fs_err(NULL, "quotafile must be on filesystem root"); 581 return -EINVAL; 582 } 583 if (ctx->info.s_qf_names[qtype]) { 584 if (strcmp(ctx->info.s_qf_names[qtype], param->string) != 0) { 585 f2fs_err(NULL, "Quota file already specified"); 586 return -EINVAL; 587 } 588 return 0; 589 } 590 591 qname = kmemdup_nul(param->string, param->size, GFP_KERNEL); 592 if (!qname) { 593 f2fs_err(NULL, "Not enough memory for storing quotafile name"); 594 return -ENOMEM; 595 } 596 F2FS_CTX_INFO(ctx).s_qf_names[qtype] = qname; 597 ctx->qname_mask |= 1 << qtype; 598 return 0; 599 } 600 601 /* 602 * Clear the name of the specified quota file. 603 */ 604 static int f2fs_unnote_qf_name(struct fs_context *fc, int qtype) 605 { 606 struct f2fs_fs_context *ctx = fc->fs_private; 607 608 kfree(ctx->info.s_qf_names[qtype]); 609 ctx->info.s_qf_names[qtype] = NULL; 610 ctx->qname_mask |= 1 << qtype; 611 return 0; 612 } 613 614 static void f2fs_unnote_qf_name_all(struct fs_context *fc) 615 { 616 int i; 617 618 for (i = 0; i < MAXQUOTAS; i++) 619 f2fs_unnote_qf_name(fc, i); 620 } 621 #endif 622 623 static int f2fs_parse_test_dummy_encryption(const struct fs_parameter *param, 624 struct f2fs_fs_context *ctx) 625 { 626 int err; 627 628 if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) { 629 f2fs_warn(NULL, "test_dummy_encryption option not supported"); 630 return -EINVAL; 631 } 632 err = fscrypt_parse_test_dummy_encryption(param, 633 &ctx->info.dummy_enc_policy); 634 if (err) { 635 if (err == -EINVAL) 636 f2fs_warn(NULL, "Value of option \"%s\" is unrecognized", 637 param->key); 638 else if (err == -EEXIST) 639 f2fs_warn(NULL, "Conflicting test_dummy_encryption options"); 640 else 641 f2fs_warn(NULL, "Error processing option \"%s\" [%d]", 642 param->key, err); 643 return -EINVAL; 644 } 645 return 0; 646 } 647 648 #ifdef CONFIG_F2FS_FS_COMPRESSION 649 static bool is_compress_extension_exist(struct f2fs_mount_info *info, 650 const char *new_ext, bool is_ext) 651 { 652 unsigned char (*ext)[F2FS_EXTENSION_LEN]; 653 int ext_cnt; 654 int i; 655 656 if (is_ext) { 657 ext = info->extensions; 658 ext_cnt = info->compress_ext_cnt; 659 } else { 660 ext = info->noextensions; 661 ext_cnt = info->nocompress_ext_cnt; 662 } 663 664 for (i = 0; i < ext_cnt; i++) { 665 if (!strcasecmp(new_ext, ext[i])) 666 return true; 667 } 668 669 return false; 670 } 671 672 /* 673 * 1. The same extension name cannot not appear in both compress and non-compress extension 674 * at the same time. 675 * 2. If the compress extension specifies all files, the types specified by the non-compress 676 * extension will be treated as special cases and will not be compressed. 677 * 3. Don't allow the non-compress extension specifies all files. 678 */ 679 static int f2fs_test_compress_extension(unsigned char (*noext)[F2FS_EXTENSION_LEN], 680 int noext_cnt, 681 unsigned char (*ext)[F2FS_EXTENSION_LEN], 682 int ext_cnt) 683 { 684 int index = 0, no_index = 0; 685 686 if (!noext_cnt) 687 return 0; 688 689 for (no_index = 0; no_index < noext_cnt; no_index++) { 690 if (strlen(noext[no_index]) == 0) 691 continue; 692 if (!strcasecmp("*", noext[no_index])) { 693 f2fs_info(NULL, "Don't allow the nocompress extension specifies all files"); 694 return -EINVAL; 695 } 696 for (index = 0; index < ext_cnt; index++) { 697 if (strlen(ext[index]) == 0) 698 continue; 699 if (!strcasecmp(ext[index], noext[no_index])) { 700 f2fs_info(NULL, "Don't allow the same extension %s appear in both compress and nocompress extension", 701 ext[index]); 702 return -EINVAL; 703 } 704 } 705 } 706 return 0; 707 } 708 709 #ifdef CONFIG_F2FS_FS_LZ4 710 static int f2fs_set_lz4hc_level(struct f2fs_fs_context *ctx, const char *str) 711 { 712 #ifdef CONFIG_F2FS_FS_LZ4HC 713 unsigned int level; 714 715 if (strlen(str) == 3) { 716 F2FS_CTX_INFO(ctx).compress_level = 0; 717 ctx->spec_mask |= F2FS_SPEC_compress_level; 718 return 0; 719 } 720 721 str += 3; 722 723 if (str[0] != ':') { 724 f2fs_info(NULL, "wrong format, e.g. <alg_name>:<compr_level>"); 725 return -EINVAL; 726 } 727 if (kstrtouint(str + 1, 10, &level)) 728 return -EINVAL; 729 730 if (!f2fs_is_compress_level_valid(COMPRESS_LZ4, level)) { 731 f2fs_info(NULL, "invalid lz4hc compress level: %d", level); 732 return -EINVAL; 733 } 734 735 F2FS_CTX_INFO(ctx).compress_level = level; 736 ctx->spec_mask |= F2FS_SPEC_compress_level; 737 return 0; 738 #else 739 if (strlen(str) == 3) { 740 F2FS_CTX_INFO(ctx).compress_level = 0; 741 ctx->spec_mask |= F2FS_SPEC_compress_level; 742 return 0; 743 } 744 f2fs_info(NULL, "kernel doesn't support lz4hc compression"); 745 return -EINVAL; 746 #endif 747 } 748 #endif 749 750 #ifdef CONFIG_F2FS_FS_ZSTD 751 static int f2fs_set_zstd_level(struct f2fs_fs_context *ctx, const char *str) 752 { 753 int level; 754 int len = 4; 755 756 if (strlen(str) == len) { 757 F2FS_CTX_INFO(ctx).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL; 758 ctx->spec_mask |= F2FS_SPEC_compress_level; 759 return 0; 760 } 761 762 str += len; 763 764 if (str[0] != ':') { 765 f2fs_info(NULL, "wrong format, e.g. <alg_name>:<compr_level>"); 766 return -EINVAL; 767 } 768 if (kstrtoint(str + 1, 10, &level)) 769 return -EINVAL; 770 771 /* f2fs does not support negative compress level now */ 772 if (level < 0) { 773 f2fs_info(NULL, "do not support negative compress level: %d", level); 774 return -ERANGE; 775 } 776 777 if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) { 778 f2fs_info(NULL, "invalid zstd compress level: %d", level); 779 return -EINVAL; 780 } 781 782 F2FS_CTX_INFO(ctx).compress_level = level; 783 ctx->spec_mask |= F2FS_SPEC_compress_level; 784 return 0; 785 } 786 #endif 787 #endif 788 789 static int f2fs_parse_param(struct fs_context *fc, struct fs_parameter *param) 790 { 791 struct f2fs_fs_context *ctx = fc->fs_private; 792 #ifdef CONFIG_F2FS_FS_COMPRESSION 793 unsigned char (*ext)[F2FS_EXTENSION_LEN]; 794 unsigned char (*noext)[F2FS_EXTENSION_LEN]; 795 int ext_cnt, noext_cnt; 796 char *name; 797 #endif 798 substring_t args[MAX_OPT_ARGS]; 799 struct fs_parse_result result; 800 int token, ret, arg; 801 802 token = fs_parse(fc, f2fs_param_specs, param, &result); 803 if (token < 0) 804 return token; 805 806 switch (token) { 807 case Opt_gc_background: 808 F2FS_CTX_INFO(ctx).bggc_mode = result.uint_32; 809 ctx->spec_mask |= F2FS_SPEC_background_gc; 810 break; 811 case Opt_disable_roll_forward: 812 ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_ROLL_FORWARD); 813 break; 814 case Opt_norecovery: 815 /* requires ro mount, checked in f2fs_validate_options */ 816 ctx_set_opt(ctx, F2FS_MOUNT_NORECOVERY); 817 break; 818 case Opt_discard: 819 if (result.negated) 820 ctx_clear_opt(ctx, F2FS_MOUNT_DISCARD); 821 else 822 ctx_set_opt(ctx, F2FS_MOUNT_DISCARD); 823 break; 824 case Opt_noheap: 825 case Opt_heap: 826 f2fs_warn(NULL, "heap/no_heap options were deprecated"); 827 break; 828 #ifdef CONFIG_F2FS_FS_XATTR 829 case Opt_user_xattr: 830 if (result.negated) 831 ctx_clear_opt(ctx, F2FS_MOUNT_XATTR_USER); 832 else 833 ctx_set_opt(ctx, F2FS_MOUNT_XATTR_USER); 834 break; 835 case Opt_inline_xattr: 836 if (result.negated) 837 ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_XATTR); 838 else 839 ctx_set_opt(ctx, F2FS_MOUNT_INLINE_XATTR); 840 break; 841 case Opt_inline_xattr_size: 842 if (result.int_32 < MIN_INLINE_XATTR_SIZE || 843 result.int_32 > MAX_INLINE_XATTR_SIZE) { 844 f2fs_err(NULL, "inline xattr size is out of range: %u ~ %u", 845 (u32)MIN_INLINE_XATTR_SIZE, (u32)MAX_INLINE_XATTR_SIZE); 846 return -EINVAL; 847 } 848 ctx_set_opt(ctx, F2FS_MOUNT_INLINE_XATTR_SIZE); 849 F2FS_CTX_INFO(ctx).inline_xattr_size = result.int_32; 850 ctx->spec_mask |= F2FS_SPEC_inline_xattr_size; 851 break; 852 #else 853 case Opt_user_xattr: 854 case Opt_inline_xattr: 855 case Opt_inline_xattr_size: 856 f2fs_info(NULL, "%s options not supported", param->key); 857 break; 858 #endif 859 #ifdef CONFIG_F2FS_FS_POSIX_ACL 860 case Opt_acl: 861 if (result.negated) 862 ctx_clear_opt(ctx, F2FS_MOUNT_POSIX_ACL); 863 else 864 ctx_set_opt(ctx, F2FS_MOUNT_POSIX_ACL); 865 break; 866 #else 867 case Opt_acl: 868 f2fs_info(NULL, "%s options not supported", param->key); 869 break; 870 #endif 871 case Opt_active_logs: 872 if (result.int_32 != 2 && result.int_32 != 4 && 873 result.int_32 != NR_CURSEG_PERSIST_TYPE) 874 return -EINVAL; 875 ctx->spec_mask |= F2FS_SPEC_active_logs; 876 F2FS_CTX_INFO(ctx).active_logs = result.int_32; 877 break; 878 case Opt_disable_ext_identify: 879 ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_EXT_IDENTIFY); 880 break; 881 case Opt_inline_data: 882 if (result.negated) 883 ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_DATA); 884 else 885 ctx_set_opt(ctx, F2FS_MOUNT_INLINE_DATA); 886 break; 887 case Opt_inline_dentry: 888 if (result.negated) 889 ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_DENTRY); 890 else 891 ctx_set_opt(ctx, F2FS_MOUNT_INLINE_DENTRY); 892 break; 893 case Opt_flush_merge: 894 if (result.negated) 895 ctx_clear_opt(ctx, F2FS_MOUNT_FLUSH_MERGE); 896 else 897 ctx_set_opt(ctx, F2FS_MOUNT_FLUSH_MERGE); 898 break; 899 case Opt_barrier: 900 if (result.negated) 901 ctx_set_opt(ctx, F2FS_MOUNT_NOBARRIER); 902 else 903 ctx_clear_opt(ctx, F2FS_MOUNT_NOBARRIER); 904 break; 905 case Opt_fastboot: 906 ctx_set_opt(ctx, F2FS_MOUNT_FASTBOOT); 907 break; 908 case Opt_extent_cache: 909 if (result.negated) 910 ctx_clear_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE); 911 else 912 ctx_set_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE); 913 break; 914 case Opt_data_flush: 915 ctx_set_opt(ctx, F2FS_MOUNT_DATA_FLUSH); 916 break; 917 case Opt_reserve_root: 918 ctx_set_opt(ctx, F2FS_MOUNT_RESERVE_ROOT); 919 F2FS_CTX_INFO(ctx).root_reserved_blocks = result.uint_32; 920 ctx->spec_mask |= F2FS_SPEC_reserve_root; 921 break; 922 case Opt_reserve_node: 923 ctx_set_opt(ctx, F2FS_MOUNT_RESERVE_NODE); 924 F2FS_CTX_INFO(ctx).root_reserved_nodes = result.uint_32; 925 ctx->spec_mask |= F2FS_SPEC_reserve_node; 926 break; 927 case Opt_resuid: 928 F2FS_CTX_INFO(ctx).s_resuid = result.uid; 929 ctx->spec_mask |= F2FS_SPEC_resuid; 930 break; 931 case Opt_resgid: 932 F2FS_CTX_INFO(ctx).s_resgid = result.gid; 933 ctx->spec_mask |= F2FS_SPEC_resgid; 934 break; 935 case Opt_mode: 936 F2FS_CTX_INFO(ctx).fs_mode = result.uint_32; 937 ctx->spec_mask |= F2FS_SPEC_mode; 938 break; 939 #ifdef CONFIG_F2FS_FAULT_INJECTION 940 case Opt_fault_injection: 941 F2FS_CTX_INFO(ctx).fault_info.inject_rate = result.int_32; 942 ctx->spec_mask |= F2FS_SPEC_fault_injection; 943 ctx_set_opt(ctx, F2FS_MOUNT_FAULT_INJECTION); 944 break; 945 946 case Opt_fault_type: 947 if (result.uint_32 > BIT(FAULT_MAX)) 948 return -EINVAL; 949 F2FS_CTX_INFO(ctx).fault_info.inject_type = result.uint_32; 950 ctx->spec_mask |= F2FS_SPEC_fault_type; 951 ctx_set_opt(ctx, F2FS_MOUNT_FAULT_INJECTION); 952 break; 953 #else 954 case Opt_fault_injection: 955 case Opt_fault_type: 956 f2fs_info(NULL, "%s options not supported", param->key); 957 break; 958 #endif 959 case Opt_lazytime: 960 if (result.negated) 961 ctx_clear_opt(ctx, F2FS_MOUNT_LAZYTIME); 962 else 963 ctx_set_opt(ctx, F2FS_MOUNT_LAZYTIME); 964 break; 965 #ifdef CONFIG_QUOTA 966 case Opt_quota: 967 if (result.negated) { 968 ctx_clear_opt(ctx, F2FS_MOUNT_QUOTA); 969 ctx_clear_opt(ctx, F2FS_MOUNT_USRQUOTA); 970 ctx_clear_opt(ctx, F2FS_MOUNT_GRPQUOTA); 971 ctx_clear_opt(ctx, F2FS_MOUNT_PRJQUOTA); 972 } else 973 ctx_set_opt(ctx, F2FS_MOUNT_USRQUOTA); 974 break; 975 case Opt_usrquota: 976 ctx_set_opt(ctx, F2FS_MOUNT_USRQUOTA); 977 break; 978 case Opt_grpquota: 979 ctx_set_opt(ctx, F2FS_MOUNT_GRPQUOTA); 980 break; 981 case Opt_prjquota: 982 ctx_set_opt(ctx, F2FS_MOUNT_PRJQUOTA); 983 break; 984 case Opt_usrjquota: 985 if (param->type == fs_value_is_string && *param->string) 986 ret = f2fs_note_qf_name(fc, USRQUOTA, param); 987 else 988 ret = f2fs_unnote_qf_name(fc, USRQUOTA); 989 if (ret) 990 return ret; 991 break; 992 case Opt_grpjquota: 993 if (param->type == fs_value_is_string && *param->string) 994 ret = f2fs_note_qf_name(fc, GRPQUOTA, param); 995 else 996 ret = f2fs_unnote_qf_name(fc, GRPQUOTA); 997 if (ret) 998 return ret; 999 break; 1000 case Opt_prjjquota: 1001 if (param->type == fs_value_is_string && *param->string) 1002 ret = f2fs_note_qf_name(fc, PRJQUOTA, param); 1003 else 1004 ret = f2fs_unnote_qf_name(fc, PRJQUOTA); 1005 if (ret) 1006 return ret; 1007 break; 1008 case Opt_jqfmt: 1009 F2FS_CTX_INFO(ctx).s_jquota_fmt = result.int_32; 1010 ctx->spec_mask |= F2FS_SPEC_jqfmt; 1011 break; 1012 #else 1013 case Opt_quota: 1014 case Opt_usrquota: 1015 case Opt_grpquota: 1016 case Opt_prjquota: 1017 case Opt_usrjquota: 1018 case Opt_grpjquota: 1019 case Opt_prjjquota: 1020 f2fs_info(NULL, "quota operations not supported"); 1021 break; 1022 #endif 1023 case Opt_alloc: 1024 F2FS_CTX_INFO(ctx).alloc_mode = result.uint_32; 1025 ctx->spec_mask |= F2FS_SPEC_alloc_mode; 1026 break; 1027 case Opt_fsync: 1028 F2FS_CTX_INFO(ctx).fsync_mode = result.uint_32; 1029 ctx->spec_mask |= F2FS_SPEC_fsync_mode; 1030 break; 1031 case Opt_test_dummy_encryption: 1032 ret = f2fs_parse_test_dummy_encryption(param, ctx); 1033 if (ret) 1034 return ret; 1035 break; 1036 case Opt_inlinecrypt: 1037 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT 1038 ctx_set_opt(ctx, F2FS_MOUNT_INLINECRYPT); 1039 #else 1040 f2fs_info(NULL, "inline encryption not supported"); 1041 #endif 1042 break; 1043 case Opt_checkpoint: 1044 /* 1045 * Initialize args struct so we know whether arg was 1046 * found; some options take optional arguments. 1047 */ 1048 args[0].from = args[0].to = NULL; 1049 arg = 0; 1050 1051 /* revert to match_table for checkpoint= options */ 1052 token = match_token(param->string, f2fs_checkpoint_tokens, args); 1053 switch (token) { 1054 case Opt_checkpoint_disable_cap_perc: 1055 if (args->from && match_int(args, &arg)) 1056 return -EINVAL; 1057 if (arg < 0 || arg > 100) 1058 return -EINVAL; 1059 F2FS_CTX_INFO(ctx).unusable_cap_perc = arg; 1060 ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap_perc; 1061 ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT); 1062 break; 1063 case Opt_checkpoint_disable_cap: 1064 if (args->from && match_int(args, &arg)) 1065 return -EINVAL; 1066 F2FS_CTX_INFO(ctx).unusable_cap = arg; 1067 ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap; 1068 ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT); 1069 break; 1070 case Opt_checkpoint_disable: 1071 ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT); 1072 break; 1073 case Opt_checkpoint_enable: 1074 F2FS_CTX_INFO(ctx).unusable_cap_perc = 0; 1075 ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap_perc; 1076 F2FS_CTX_INFO(ctx).unusable_cap = 0; 1077 ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap; 1078 ctx_clear_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT); 1079 break; 1080 default: 1081 return -EINVAL; 1082 } 1083 break; 1084 case Opt_checkpoint_merge: 1085 if (result.negated) 1086 ctx_clear_opt(ctx, F2FS_MOUNT_MERGE_CHECKPOINT); 1087 else 1088 ctx_set_opt(ctx, F2FS_MOUNT_MERGE_CHECKPOINT); 1089 break; 1090 #ifdef CONFIG_F2FS_FS_COMPRESSION 1091 case Opt_compress_algorithm: 1092 name = param->string; 1093 if (!strcmp(name, "lzo")) { 1094 #ifdef CONFIG_F2FS_FS_LZO 1095 F2FS_CTX_INFO(ctx).compress_level = 0; 1096 F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZO; 1097 ctx->spec_mask |= F2FS_SPEC_compress_level; 1098 ctx->spec_mask |= F2FS_SPEC_compress_algorithm; 1099 #else 1100 f2fs_info(NULL, "kernel doesn't support lzo compression"); 1101 #endif 1102 } else if (!strncmp(name, "lz4", 3)) { 1103 #ifdef CONFIG_F2FS_FS_LZ4 1104 ret = f2fs_set_lz4hc_level(ctx, name); 1105 if (ret) 1106 return -EINVAL; 1107 F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZ4; 1108 ctx->spec_mask |= F2FS_SPEC_compress_algorithm; 1109 #else 1110 f2fs_info(NULL, "kernel doesn't support lz4 compression"); 1111 #endif 1112 } else if (!strncmp(name, "zstd", 4)) { 1113 #ifdef CONFIG_F2FS_FS_ZSTD 1114 ret = f2fs_set_zstd_level(ctx, name); 1115 if (ret) 1116 return -EINVAL; 1117 F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_ZSTD; 1118 ctx->spec_mask |= F2FS_SPEC_compress_algorithm; 1119 #else 1120 f2fs_info(NULL, "kernel doesn't support zstd compression"); 1121 #endif 1122 } else if (!strcmp(name, "lzo-rle")) { 1123 #ifdef CONFIG_F2FS_FS_LZORLE 1124 F2FS_CTX_INFO(ctx).compress_level = 0; 1125 F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZORLE; 1126 ctx->spec_mask |= F2FS_SPEC_compress_level; 1127 ctx->spec_mask |= F2FS_SPEC_compress_algorithm; 1128 #else 1129 f2fs_info(NULL, "kernel doesn't support lzorle compression"); 1130 #endif 1131 } else 1132 return -EINVAL; 1133 break; 1134 case Opt_compress_log_size: 1135 if (result.uint_32 < MIN_COMPRESS_LOG_SIZE || 1136 result.uint_32 > MAX_COMPRESS_LOG_SIZE) { 1137 f2fs_err(NULL, 1138 "Compress cluster log size is out of range"); 1139 return -EINVAL; 1140 } 1141 F2FS_CTX_INFO(ctx).compress_log_size = result.uint_32; 1142 ctx->spec_mask |= F2FS_SPEC_compress_log_size; 1143 break; 1144 case Opt_compress_extension: 1145 name = param->string; 1146 ext = F2FS_CTX_INFO(ctx).extensions; 1147 ext_cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt; 1148 1149 if (strlen(name) >= F2FS_EXTENSION_LEN || 1150 ext_cnt >= COMPRESS_EXT_NUM) { 1151 f2fs_err(NULL, "invalid extension length/number"); 1152 return -EINVAL; 1153 } 1154 1155 if (is_compress_extension_exist(&ctx->info, name, true)) 1156 break; 1157 1158 ret = strscpy(ext[ext_cnt], name, F2FS_EXTENSION_LEN); 1159 if (ret < 0) 1160 return ret; 1161 F2FS_CTX_INFO(ctx).compress_ext_cnt++; 1162 ctx->spec_mask |= F2FS_SPEC_compress_extension; 1163 break; 1164 case Opt_nocompress_extension: 1165 name = param->string; 1166 noext = F2FS_CTX_INFO(ctx).noextensions; 1167 noext_cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt; 1168 1169 if (strlen(name) >= F2FS_EXTENSION_LEN || 1170 noext_cnt >= COMPRESS_EXT_NUM) { 1171 f2fs_err(NULL, "invalid extension length/number"); 1172 return -EINVAL; 1173 } 1174 1175 if (is_compress_extension_exist(&ctx->info, name, false)) 1176 break; 1177 1178 ret = strscpy(noext[noext_cnt], name, F2FS_EXTENSION_LEN); 1179 if (ret < 0) 1180 return ret; 1181 F2FS_CTX_INFO(ctx).nocompress_ext_cnt++; 1182 ctx->spec_mask |= F2FS_SPEC_nocompress_extension; 1183 break; 1184 case Opt_compress_chksum: 1185 F2FS_CTX_INFO(ctx).compress_chksum = true; 1186 ctx->spec_mask |= F2FS_SPEC_compress_chksum; 1187 break; 1188 case Opt_compress_mode: 1189 F2FS_CTX_INFO(ctx).compress_mode = result.uint_32; 1190 ctx->spec_mask |= F2FS_SPEC_compress_mode; 1191 break; 1192 case Opt_compress_cache: 1193 ctx_set_opt(ctx, F2FS_MOUNT_COMPRESS_CACHE); 1194 break; 1195 #else 1196 case Opt_compress_algorithm: 1197 case Opt_compress_log_size: 1198 case Opt_compress_extension: 1199 case Opt_nocompress_extension: 1200 case Opt_compress_chksum: 1201 case Opt_compress_mode: 1202 case Opt_compress_cache: 1203 f2fs_info(NULL, "compression options not supported"); 1204 break; 1205 #endif 1206 case Opt_atgc: 1207 ctx_set_opt(ctx, F2FS_MOUNT_ATGC); 1208 break; 1209 case Opt_gc_merge: 1210 if (result.negated) 1211 ctx_clear_opt(ctx, F2FS_MOUNT_GC_MERGE); 1212 else 1213 ctx_set_opt(ctx, F2FS_MOUNT_GC_MERGE); 1214 break; 1215 case Opt_discard_unit: 1216 F2FS_CTX_INFO(ctx).discard_unit = result.uint_32; 1217 ctx->spec_mask |= F2FS_SPEC_discard_unit; 1218 break; 1219 case Opt_memory_mode: 1220 F2FS_CTX_INFO(ctx).memory_mode = result.uint_32; 1221 ctx->spec_mask |= F2FS_SPEC_memory_mode; 1222 break; 1223 case Opt_age_extent_cache: 1224 ctx_set_opt(ctx, F2FS_MOUNT_AGE_EXTENT_CACHE); 1225 break; 1226 case Opt_errors: 1227 F2FS_CTX_INFO(ctx).errors = result.uint_32; 1228 ctx->spec_mask |= F2FS_SPEC_errors; 1229 break; 1230 case Opt_nat_bits: 1231 ctx_set_opt(ctx, F2FS_MOUNT_NAT_BITS); 1232 break; 1233 case Opt_lookup_mode: 1234 F2FS_CTX_INFO(ctx).lookup_mode = result.uint_32; 1235 ctx->spec_mask |= F2FS_SPEC_lookup_mode; 1236 break; 1237 } 1238 return 0; 1239 } 1240 1241 /* 1242 * Check quota settings consistency. 1243 */ 1244 static int f2fs_check_quota_consistency(struct fs_context *fc, 1245 struct super_block *sb) 1246 { 1247 struct f2fs_sb_info *sbi = F2FS_SB(sb); 1248 #ifdef CONFIG_QUOTA 1249 struct f2fs_fs_context *ctx = fc->fs_private; 1250 bool quota_feature = f2fs_sb_has_quota_ino(sbi); 1251 bool quota_turnon = sb_any_quota_loaded(sb); 1252 char *old_qname, *new_qname; 1253 bool usr_qf_name, grp_qf_name, prj_qf_name, usrquota, grpquota, prjquota; 1254 int i; 1255 1256 /* 1257 * We do the test below only for project quotas. 'usrquota' and 1258 * 'grpquota' mount options are allowed even without quota feature 1259 * to support legacy quotas in quota files. 1260 */ 1261 if (ctx_test_opt(ctx, F2FS_MOUNT_PRJQUOTA) && 1262 !f2fs_sb_has_project_quota(sbi)) { 1263 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement."); 1264 return -EINVAL; 1265 } 1266 1267 if (ctx->qname_mask) { 1268 for (i = 0; i < MAXQUOTAS; i++) { 1269 if (!(ctx->qname_mask & (1 << i))) 1270 continue; 1271 1272 old_qname = F2FS_OPTION(sbi).s_qf_names[i]; 1273 new_qname = F2FS_CTX_INFO(ctx).s_qf_names[i]; 1274 if (quota_turnon && 1275 !!old_qname != !!new_qname) 1276 goto err_jquota_change; 1277 1278 if (old_qname) { 1279 if (!new_qname) { 1280 f2fs_info(sbi, "remove qf_name %s", 1281 old_qname); 1282 continue; 1283 } else if (strcmp(old_qname, new_qname) == 0) { 1284 ctx->qname_mask &= ~(1 << i); 1285 continue; 1286 } 1287 goto err_jquota_specified; 1288 } 1289 1290 if (quota_feature) { 1291 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name"); 1292 ctx->qname_mask &= ~(1 << i); 1293 kfree(F2FS_CTX_INFO(ctx).s_qf_names[i]); 1294 F2FS_CTX_INFO(ctx).s_qf_names[i] = NULL; 1295 } 1296 } 1297 } 1298 1299 /* Make sure we don't mix old and new quota format */ 1300 usr_qf_name = F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 1301 F2FS_CTX_INFO(ctx).s_qf_names[USRQUOTA]; 1302 grp_qf_name = F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || 1303 F2FS_CTX_INFO(ctx).s_qf_names[GRPQUOTA]; 1304 prj_qf_name = F2FS_OPTION(sbi).s_qf_names[PRJQUOTA] || 1305 F2FS_CTX_INFO(ctx).s_qf_names[PRJQUOTA]; 1306 usrquota = test_opt(sbi, USRQUOTA) || 1307 ctx_test_opt(ctx, F2FS_MOUNT_USRQUOTA); 1308 grpquota = test_opt(sbi, GRPQUOTA) || 1309 ctx_test_opt(ctx, F2FS_MOUNT_GRPQUOTA); 1310 prjquota = test_opt(sbi, PRJQUOTA) || 1311 ctx_test_opt(ctx, F2FS_MOUNT_PRJQUOTA); 1312 1313 if (usr_qf_name) { 1314 ctx_clear_opt(ctx, F2FS_MOUNT_USRQUOTA); 1315 usrquota = false; 1316 } 1317 if (grp_qf_name) { 1318 ctx_clear_opt(ctx, F2FS_MOUNT_GRPQUOTA); 1319 grpquota = false; 1320 } 1321 if (prj_qf_name) { 1322 ctx_clear_opt(ctx, F2FS_MOUNT_PRJQUOTA); 1323 prjquota = false; 1324 } 1325 if (usr_qf_name || grp_qf_name || prj_qf_name) { 1326 if (grpquota || usrquota || prjquota) { 1327 f2fs_err(sbi, "old and new quota format mixing"); 1328 return -EINVAL; 1329 } 1330 if (!(ctx->spec_mask & F2FS_SPEC_jqfmt || 1331 F2FS_OPTION(sbi).s_jquota_fmt)) { 1332 f2fs_err(sbi, "journaled quota format not specified"); 1333 return -EINVAL; 1334 } 1335 } 1336 return 0; 1337 1338 err_jquota_change: 1339 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); 1340 return -EINVAL; 1341 err_jquota_specified: 1342 f2fs_err(sbi, "%s quota file already specified", 1343 QTYPE2NAME(i)); 1344 return -EINVAL; 1345 1346 #else 1347 if (f2fs_readonly(sbi->sb)) 1348 return 0; 1349 if (f2fs_sb_has_quota_ino(sbi)) { 1350 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA"); 1351 return -EINVAL; 1352 } 1353 if (f2fs_sb_has_project_quota(sbi)) { 1354 f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA"); 1355 return -EINVAL; 1356 } 1357 1358 return 0; 1359 #endif 1360 } 1361 1362 static int f2fs_check_test_dummy_encryption(struct fs_context *fc, 1363 struct super_block *sb) 1364 { 1365 struct f2fs_fs_context *ctx = fc->fs_private; 1366 struct f2fs_sb_info *sbi = F2FS_SB(sb); 1367 1368 if (!fscrypt_is_dummy_policy_set(&F2FS_CTX_INFO(ctx).dummy_enc_policy)) 1369 return 0; 1370 1371 if (!f2fs_sb_has_encrypt(sbi)) { 1372 f2fs_err(sbi, "Encrypt feature is off"); 1373 return -EINVAL; 1374 } 1375 1376 /* 1377 * This mount option is just for testing, and it's not worthwhile to 1378 * implement the extra complexity (e.g. RCU protection) that would be 1379 * needed to allow it to be set or changed during remount. We do allow 1380 * it to be specified during remount, but only if there is no change. 1381 */ 1382 if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 1383 if (fscrypt_dummy_policies_equal(&F2FS_OPTION(sbi).dummy_enc_policy, 1384 &F2FS_CTX_INFO(ctx).dummy_enc_policy)) 1385 return 0; 1386 f2fs_warn(sbi, "Can't set or change test_dummy_encryption on remount"); 1387 return -EINVAL; 1388 } 1389 return 0; 1390 } 1391 1392 static inline bool test_compression_spec(unsigned int mask) 1393 { 1394 return mask & (F2FS_SPEC_compress_algorithm 1395 | F2FS_SPEC_compress_log_size 1396 | F2FS_SPEC_compress_extension 1397 | F2FS_SPEC_nocompress_extension 1398 | F2FS_SPEC_compress_chksum 1399 | F2FS_SPEC_compress_mode); 1400 } 1401 1402 static inline void clear_compression_spec(struct f2fs_fs_context *ctx) 1403 { 1404 ctx->spec_mask &= ~(F2FS_SPEC_compress_algorithm 1405 | F2FS_SPEC_compress_log_size 1406 | F2FS_SPEC_compress_extension 1407 | F2FS_SPEC_nocompress_extension 1408 | F2FS_SPEC_compress_chksum 1409 | F2FS_SPEC_compress_mode); 1410 } 1411 1412 static int f2fs_check_compression(struct fs_context *fc, 1413 struct super_block *sb) 1414 { 1415 #ifdef CONFIG_F2FS_FS_COMPRESSION 1416 struct f2fs_fs_context *ctx = fc->fs_private; 1417 struct f2fs_sb_info *sbi = F2FS_SB(sb); 1418 int i, cnt; 1419 1420 if (!f2fs_sb_has_compression(sbi)) { 1421 if (test_compression_spec(ctx->spec_mask) || 1422 ctx_test_opt(ctx, F2FS_MOUNT_COMPRESS_CACHE)) 1423 f2fs_info(sbi, "Image doesn't support compression"); 1424 clear_compression_spec(ctx); 1425 ctx->opt_mask &= ~BIT(F2FS_MOUNT_COMPRESS_CACHE); 1426 return 0; 1427 } 1428 if (ctx->spec_mask & F2FS_SPEC_compress_extension) { 1429 cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt; 1430 for (i = 0; i < F2FS_CTX_INFO(ctx).compress_ext_cnt; i++) { 1431 if (is_compress_extension_exist(&F2FS_OPTION(sbi), 1432 F2FS_CTX_INFO(ctx).extensions[i], true)) { 1433 F2FS_CTX_INFO(ctx).extensions[i][0] = '\0'; 1434 cnt--; 1435 } 1436 } 1437 if (F2FS_OPTION(sbi).compress_ext_cnt + cnt > COMPRESS_EXT_NUM) { 1438 f2fs_err(sbi, "invalid extension length/number"); 1439 return -EINVAL; 1440 } 1441 } 1442 if (ctx->spec_mask & F2FS_SPEC_nocompress_extension) { 1443 cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt; 1444 for (i = 0; i < F2FS_CTX_INFO(ctx).nocompress_ext_cnt; i++) { 1445 if (is_compress_extension_exist(&F2FS_OPTION(sbi), 1446 F2FS_CTX_INFO(ctx).noextensions[i], false)) { 1447 F2FS_CTX_INFO(ctx).noextensions[i][0] = '\0'; 1448 cnt--; 1449 } 1450 } 1451 if (F2FS_OPTION(sbi).nocompress_ext_cnt + cnt > COMPRESS_EXT_NUM) { 1452 f2fs_err(sbi, "invalid noextension length/number"); 1453 return -EINVAL; 1454 } 1455 } 1456 1457 if (f2fs_test_compress_extension(F2FS_CTX_INFO(ctx).noextensions, 1458 F2FS_CTX_INFO(ctx).nocompress_ext_cnt, 1459 F2FS_CTX_INFO(ctx).extensions, 1460 F2FS_CTX_INFO(ctx).compress_ext_cnt)) { 1461 f2fs_err(sbi, "new noextensions conflicts with new extensions"); 1462 return -EINVAL; 1463 } 1464 if (f2fs_test_compress_extension(F2FS_CTX_INFO(ctx).noextensions, 1465 F2FS_CTX_INFO(ctx).nocompress_ext_cnt, 1466 F2FS_OPTION(sbi).extensions, 1467 F2FS_OPTION(sbi).compress_ext_cnt)) { 1468 f2fs_err(sbi, "new noextensions conflicts with old extensions"); 1469 return -EINVAL; 1470 } 1471 if (f2fs_test_compress_extension(F2FS_OPTION(sbi).noextensions, 1472 F2FS_OPTION(sbi).nocompress_ext_cnt, 1473 F2FS_CTX_INFO(ctx).extensions, 1474 F2FS_CTX_INFO(ctx).compress_ext_cnt)) { 1475 f2fs_err(sbi, "new extensions conflicts with old noextensions"); 1476 return -EINVAL; 1477 } 1478 #endif 1479 return 0; 1480 } 1481 1482 static int f2fs_check_opt_consistency(struct fs_context *fc, 1483 struct super_block *sb) 1484 { 1485 struct f2fs_fs_context *ctx = fc->fs_private; 1486 struct f2fs_sb_info *sbi = F2FS_SB(sb); 1487 int err; 1488 1489 if (ctx_test_opt(ctx, F2FS_MOUNT_NORECOVERY) && !f2fs_readonly(sb)) 1490 return -EINVAL; 1491 1492 if (f2fs_hw_should_discard(sbi) && 1493 (ctx->opt_mask & BIT(F2FS_MOUNT_DISCARD)) && 1494 !ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) { 1495 f2fs_warn(sbi, "discard is required for zoned block devices"); 1496 return -EINVAL; 1497 } 1498 1499 if (!f2fs_hw_support_discard(sbi) && 1500 (ctx->opt_mask & BIT(F2FS_MOUNT_DISCARD)) && 1501 ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) { 1502 f2fs_warn(sbi, "device does not support discard"); 1503 ctx_clear_opt(ctx, F2FS_MOUNT_DISCARD); 1504 ctx->opt_mask &= ~BIT(F2FS_MOUNT_DISCARD); 1505 } 1506 1507 if (f2fs_sb_has_device_alias(sbi) && 1508 (ctx->opt_mask & BIT(F2FS_MOUNT_READ_EXTENT_CACHE)) && 1509 !ctx_test_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE)) { 1510 f2fs_err(sbi, "device aliasing requires extent cache"); 1511 return -EINVAL; 1512 } 1513 1514 if (test_opt(sbi, RESERVE_ROOT) && 1515 (ctx->opt_mask & BIT(F2FS_MOUNT_RESERVE_ROOT)) && 1516 ctx_test_opt(ctx, F2FS_MOUNT_RESERVE_ROOT)) { 1517 f2fs_info(sbi, "Preserve previous reserve_root=%u", 1518 F2FS_OPTION(sbi).root_reserved_blocks); 1519 ctx_clear_opt(ctx, F2FS_MOUNT_RESERVE_ROOT); 1520 ctx->opt_mask &= ~BIT(F2FS_MOUNT_RESERVE_ROOT); 1521 ctx->spec_mask &= ~F2FS_SPEC_reserve_root; 1522 } 1523 if (test_opt(sbi, RESERVE_NODE) && 1524 (ctx->opt_mask & BIT(F2FS_MOUNT_RESERVE_NODE)) && 1525 ctx_test_opt(ctx, F2FS_MOUNT_RESERVE_NODE)) { 1526 f2fs_info(sbi, "Preserve previous reserve_node=%u", 1527 F2FS_OPTION(sbi).root_reserved_nodes); 1528 ctx_clear_opt(ctx, F2FS_MOUNT_RESERVE_NODE); 1529 ctx->opt_mask &= ~BIT(F2FS_MOUNT_RESERVE_NODE); 1530 ctx->spec_mask &= ~F2FS_SPEC_reserve_node; 1531 } 1532 1533 err = f2fs_check_test_dummy_encryption(fc, sb); 1534 if (err) 1535 return err; 1536 1537 err = f2fs_check_compression(fc, sb); 1538 if (err) 1539 return err; 1540 1541 err = f2fs_check_quota_consistency(fc, sb); 1542 if (err) 1543 return err; 1544 1545 if (!IS_ENABLED(CONFIG_UNICODE) && f2fs_sb_has_casefold(sbi)) { 1546 f2fs_err(sbi, 1547 "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE"); 1548 return -EINVAL; 1549 } 1550 1551 /* 1552 * The BLKZONED feature indicates that the drive was formatted with 1553 * zone alignment optimization. This is optional for host-aware 1554 * devices, but mandatory for host-managed zoned block devices. 1555 */ 1556 if (f2fs_sb_has_blkzoned(sbi)) { 1557 if (F2FS_CTX_INFO(ctx).bggc_mode == BGGC_MODE_OFF) { 1558 f2fs_warn(sbi, "zoned devices need bggc"); 1559 return -EINVAL; 1560 } 1561 #ifdef CONFIG_BLK_DEV_ZONED 1562 if ((ctx->spec_mask & F2FS_SPEC_discard_unit) && 1563 F2FS_CTX_INFO(ctx).discard_unit != DISCARD_UNIT_SECTION) { 1564 f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default"); 1565 F2FS_CTX_INFO(ctx).discard_unit = DISCARD_UNIT_SECTION; 1566 } 1567 1568 if ((ctx->spec_mask & F2FS_SPEC_mode) && 1569 F2FS_CTX_INFO(ctx).fs_mode != FS_MODE_LFS) { 1570 f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature"); 1571 return -EINVAL; 1572 } 1573 #else 1574 f2fs_err(sbi, "Zoned block device support is not enabled"); 1575 return -EINVAL; 1576 #endif 1577 } 1578 1579 if (ctx_test_opt(ctx, F2FS_MOUNT_INLINE_XATTR_SIZE)) { 1580 if (!f2fs_sb_has_extra_attr(sbi) || 1581 !f2fs_sb_has_flexible_inline_xattr(sbi)) { 1582 f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off"); 1583 return -EINVAL; 1584 } 1585 if (!ctx_test_opt(ctx, F2FS_MOUNT_INLINE_XATTR) && !test_opt(sbi, INLINE_XATTR)) { 1586 f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option"); 1587 return -EINVAL; 1588 } 1589 } 1590 1591 if (ctx_test_opt(ctx, F2FS_MOUNT_ATGC) && 1592 F2FS_CTX_INFO(ctx).fs_mode == FS_MODE_LFS) { 1593 f2fs_err(sbi, "LFS is not compatible with ATGC"); 1594 return -EINVAL; 1595 } 1596 1597 if (f2fs_is_readonly(sbi) && ctx_test_opt(ctx, F2FS_MOUNT_FLUSH_MERGE)) { 1598 f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode"); 1599 return -EINVAL; 1600 } 1601 1602 if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) { 1603 f2fs_err(sbi, "Allow to mount readonly mode only"); 1604 return -EROFS; 1605 } 1606 return 0; 1607 } 1608 1609 static void f2fs_apply_quota_options(struct fs_context *fc, 1610 struct super_block *sb) 1611 { 1612 #ifdef CONFIG_QUOTA 1613 struct f2fs_fs_context *ctx = fc->fs_private; 1614 struct f2fs_sb_info *sbi = F2FS_SB(sb); 1615 bool quota_feature = f2fs_sb_has_quota_ino(sbi); 1616 char *qname; 1617 int i; 1618 1619 if (quota_feature) 1620 return; 1621 1622 for (i = 0; i < MAXQUOTAS; i++) { 1623 if (!(ctx->qname_mask & (1 << i))) 1624 continue; 1625 1626 qname = F2FS_CTX_INFO(ctx).s_qf_names[i]; 1627 if (qname) { 1628 qname = kstrdup(F2FS_CTX_INFO(ctx).s_qf_names[i], 1629 GFP_KERNEL | __GFP_NOFAIL); 1630 set_opt(sbi, QUOTA); 1631 } 1632 F2FS_OPTION(sbi).s_qf_names[i] = qname; 1633 } 1634 1635 if (ctx->spec_mask & F2FS_SPEC_jqfmt) 1636 F2FS_OPTION(sbi).s_jquota_fmt = F2FS_CTX_INFO(ctx).s_jquota_fmt; 1637 1638 if (quota_feature && F2FS_OPTION(sbi).s_jquota_fmt) { 1639 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt"); 1640 F2FS_OPTION(sbi).s_jquota_fmt = 0; 1641 } 1642 #endif 1643 } 1644 1645 static void f2fs_apply_test_dummy_encryption(struct fs_context *fc, 1646 struct super_block *sb) 1647 { 1648 struct f2fs_fs_context *ctx = fc->fs_private; 1649 struct f2fs_sb_info *sbi = F2FS_SB(sb); 1650 1651 if (!fscrypt_is_dummy_policy_set(&F2FS_CTX_INFO(ctx).dummy_enc_policy) || 1652 /* if already set, it was already verified to be the same */ 1653 fscrypt_is_dummy_policy_set(&F2FS_OPTION(sbi).dummy_enc_policy)) 1654 return; 1655 swap(F2FS_OPTION(sbi).dummy_enc_policy, F2FS_CTX_INFO(ctx).dummy_enc_policy); 1656 f2fs_warn(sbi, "Test dummy encryption mode enabled"); 1657 } 1658 1659 static void f2fs_apply_compression(struct fs_context *fc, 1660 struct super_block *sb) 1661 { 1662 #ifdef CONFIG_F2FS_FS_COMPRESSION 1663 struct f2fs_fs_context *ctx = fc->fs_private; 1664 struct f2fs_sb_info *sbi = F2FS_SB(sb); 1665 unsigned char (*ctx_ext)[F2FS_EXTENSION_LEN]; 1666 unsigned char (*sbi_ext)[F2FS_EXTENSION_LEN]; 1667 int ctx_cnt, sbi_cnt, i; 1668 1669 if (ctx->spec_mask & F2FS_SPEC_compress_level) 1670 F2FS_OPTION(sbi).compress_level = 1671 F2FS_CTX_INFO(ctx).compress_level; 1672 if (ctx->spec_mask & F2FS_SPEC_compress_algorithm) 1673 F2FS_OPTION(sbi).compress_algorithm = 1674 F2FS_CTX_INFO(ctx).compress_algorithm; 1675 if (ctx->spec_mask & F2FS_SPEC_compress_log_size) 1676 F2FS_OPTION(sbi).compress_log_size = 1677 F2FS_CTX_INFO(ctx).compress_log_size; 1678 if (ctx->spec_mask & F2FS_SPEC_compress_chksum) 1679 F2FS_OPTION(sbi).compress_chksum = 1680 F2FS_CTX_INFO(ctx).compress_chksum; 1681 if (ctx->spec_mask & F2FS_SPEC_compress_mode) 1682 F2FS_OPTION(sbi).compress_mode = 1683 F2FS_CTX_INFO(ctx).compress_mode; 1684 if (ctx->spec_mask & F2FS_SPEC_compress_extension) { 1685 ctx_ext = F2FS_CTX_INFO(ctx).extensions; 1686 ctx_cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt; 1687 sbi_ext = F2FS_OPTION(sbi).extensions; 1688 sbi_cnt = F2FS_OPTION(sbi).compress_ext_cnt; 1689 for (i = 0; i < ctx_cnt; i++) { 1690 if (strlen(ctx_ext[i]) == 0) 1691 continue; 1692 strscpy(sbi_ext[sbi_cnt], ctx_ext[i]); 1693 sbi_cnt++; 1694 } 1695 F2FS_OPTION(sbi).compress_ext_cnt = sbi_cnt; 1696 } 1697 if (ctx->spec_mask & F2FS_SPEC_nocompress_extension) { 1698 ctx_ext = F2FS_CTX_INFO(ctx).noextensions; 1699 ctx_cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt; 1700 sbi_ext = F2FS_OPTION(sbi).noextensions; 1701 sbi_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt; 1702 for (i = 0; i < ctx_cnt; i++) { 1703 if (strlen(ctx_ext[i]) == 0) 1704 continue; 1705 strscpy(sbi_ext[sbi_cnt], ctx_ext[i]); 1706 sbi_cnt++; 1707 } 1708 F2FS_OPTION(sbi).nocompress_ext_cnt = sbi_cnt; 1709 } 1710 #endif 1711 } 1712 1713 static void f2fs_apply_options(struct fs_context *fc, struct super_block *sb) 1714 { 1715 struct f2fs_fs_context *ctx = fc->fs_private; 1716 struct f2fs_sb_info *sbi = F2FS_SB(sb); 1717 1718 F2FS_OPTION(sbi).opt &= ~ctx->opt_mask; 1719 F2FS_OPTION(sbi).opt |= F2FS_CTX_INFO(ctx).opt; 1720 1721 if (ctx->spec_mask & F2FS_SPEC_background_gc) 1722 F2FS_OPTION(sbi).bggc_mode = F2FS_CTX_INFO(ctx).bggc_mode; 1723 if (ctx->spec_mask & F2FS_SPEC_inline_xattr_size) 1724 F2FS_OPTION(sbi).inline_xattr_size = 1725 F2FS_CTX_INFO(ctx).inline_xattr_size; 1726 if (ctx->spec_mask & F2FS_SPEC_active_logs) 1727 F2FS_OPTION(sbi).active_logs = F2FS_CTX_INFO(ctx).active_logs; 1728 if (ctx->spec_mask & F2FS_SPEC_reserve_root) 1729 F2FS_OPTION(sbi).root_reserved_blocks = 1730 F2FS_CTX_INFO(ctx).root_reserved_blocks; 1731 if (ctx->spec_mask & F2FS_SPEC_reserve_node) 1732 F2FS_OPTION(sbi).root_reserved_nodes = 1733 F2FS_CTX_INFO(ctx).root_reserved_nodes; 1734 if (ctx->spec_mask & F2FS_SPEC_resgid) 1735 F2FS_OPTION(sbi).s_resgid = F2FS_CTX_INFO(ctx).s_resgid; 1736 if (ctx->spec_mask & F2FS_SPEC_resuid) 1737 F2FS_OPTION(sbi).s_resuid = F2FS_CTX_INFO(ctx).s_resuid; 1738 if (ctx->spec_mask & F2FS_SPEC_mode) 1739 F2FS_OPTION(sbi).fs_mode = F2FS_CTX_INFO(ctx).fs_mode; 1740 #ifdef CONFIG_F2FS_FAULT_INJECTION 1741 if (ctx->spec_mask & F2FS_SPEC_fault_injection) 1742 (void)f2fs_build_fault_attr(sbi, 1743 F2FS_CTX_INFO(ctx).fault_info.inject_rate, 0, FAULT_RATE); 1744 if (ctx->spec_mask & F2FS_SPEC_fault_type) 1745 (void)f2fs_build_fault_attr(sbi, 0, 1746 F2FS_CTX_INFO(ctx).fault_info.inject_type, FAULT_TYPE); 1747 #endif 1748 if (ctx->spec_mask & F2FS_SPEC_alloc_mode) 1749 F2FS_OPTION(sbi).alloc_mode = F2FS_CTX_INFO(ctx).alloc_mode; 1750 if (ctx->spec_mask & F2FS_SPEC_fsync_mode) 1751 F2FS_OPTION(sbi).fsync_mode = F2FS_CTX_INFO(ctx).fsync_mode; 1752 if (ctx->spec_mask & F2FS_SPEC_checkpoint_disable_cap) 1753 F2FS_OPTION(sbi).unusable_cap = F2FS_CTX_INFO(ctx).unusable_cap; 1754 if (ctx->spec_mask & F2FS_SPEC_checkpoint_disable_cap_perc) 1755 F2FS_OPTION(sbi).unusable_cap_perc = 1756 F2FS_CTX_INFO(ctx).unusable_cap_perc; 1757 if (ctx->spec_mask & F2FS_SPEC_discard_unit) 1758 F2FS_OPTION(sbi).discard_unit = F2FS_CTX_INFO(ctx).discard_unit; 1759 if (ctx->spec_mask & F2FS_SPEC_memory_mode) 1760 F2FS_OPTION(sbi).memory_mode = F2FS_CTX_INFO(ctx).memory_mode; 1761 if (ctx->spec_mask & F2FS_SPEC_errors) 1762 F2FS_OPTION(sbi).errors = F2FS_CTX_INFO(ctx).errors; 1763 if (ctx->spec_mask & F2FS_SPEC_lookup_mode) 1764 F2FS_OPTION(sbi).lookup_mode = F2FS_CTX_INFO(ctx).lookup_mode; 1765 1766 f2fs_apply_compression(fc, sb); 1767 f2fs_apply_test_dummy_encryption(fc, sb); 1768 f2fs_apply_quota_options(fc, sb); 1769 } 1770 1771 static int f2fs_sanity_check_options(struct f2fs_sb_info *sbi, bool remount) 1772 { 1773 if (f2fs_sb_has_device_alias(sbi) && 1774 !test_opt(sbi, READ_EXTENT_CACHE)) { 1775 f2fs_err(sbi, "device aliasing requires extent cache"); 1776 return -EINVAL; 1777 } 1778 1779 if (!remount) 1780 return 0; 1781 1782 #ifdef CONFIG_BLK_DEV_ZONED 1783 if (f2fs_sb_has_blkzoned(sbi) && 1784 sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) { 1785 f2fs_err(sbi, 1786 "zoned: max open zones %u is too small, need at least %u open zones", 1787 sbi->max_open_zones, F2FS_OPTION(sbi).active_logs); 1788 return -EINVAL; 1789 } 1790 #endif 1791 if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) { 1792 f2fs_warn(sbi, "LFS is not compatible with IPU"); 1793 return -EINVAL; 1794 } 1795 return 0; 1796 } 1797 1798 static struct inode *f2fs_alloc_inode(struct super_block *sb) 1799 { 1800 struct f2fs_inode_info *fi; 1801 1802 if (time_to_inject(F2FS_SB(sb), FAULT_SLAB_ALLOC)) 1803 return NULL; 1804 1805 fi = alloc_inode_sb(sb, f2fs_inode_cachep, GFP_F2FS_ZERO); 1806 if (!fi) 1807 return NULL; 1808 1809 init_once((void *) fi); 1810 1811 /* Initialize f2fs-specific inode info */ 1812 atomic_set(&fi->dirty_pages, 0); 1813 atomic_set(&fi->i_compr_blocks, 0); 1814 atomic_set(&fi->open_count, 0); 1815 atomic_set(&fi->writeback, 0); 1816 init_f2fs_rwsem(&fi->i_sem); 1817 spin_lock_init(&fi->i_size_lock); 1818 INIT_LIST_HEAD(&fi->dirty_list); 1819 INIT_LIST_HEAD(&fi->gdirty_list); 1820 INIT_LIST_HEAD(&fi->gdonate_list); 1821 init_f2fs_rwsem(&fi->i_gc_rwsem[READ]); 1822 init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]); 1823 init_f2fs_rwsem(&fi->i_xattr_sem); 1824 1825 /* Will be used by directory only */ 1826 fi->i_dir_level = F2FS_SB(sb)->dir_level; 1827 1828 return &fi->vfs_inode; 1829 } 1830 1831 static int f2fs_drop_inode(struct inode *inode) 1832 { 1833 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1834 int ret; 1835 1836 /* 1837 * during filesystem shutdown, if checkpoint is disabled, 1838 * drop useless meta/node dirty pages. 1839 */ 1840 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 1841 if (inode->i_ino == F2FS_NODE_INO(sbi) || 1842 inode->i_ino == F2FS_META_INO(sbi)) { 1843 trace_f2fs_drop_inode(inode, 1); 1844 return 1; 1845 } 1846 } 1847 1848 /* 1849 * This is to avoid a deadlock condition like below. 1850 * writeback_single_inode(inode) 1851 * - f2fs_write_data_page 1852 * - f2fs_gc -> iput -> evict 1853 * - inode_wait_for_writeback(inode) 1854 */ 1855 if ((!inode_unhashed(inode) && inode_state_read(inode) & I_SYNC)) { 1856 if (!inode->i_nlink && !is_bad_inode(inode)) { 1857 /* to avoid evict_inode call simultaneously */ 1858 __iget(inode); 1859 spin_unlock(&inode->i_lock); 1860 1861 /* should remain fi->extent_tree for writepage */ 1862 f2fs_destroy_extent_node(inode); 1863 1864 sb_start_intwrite(inode->i_sb); 1865 f2fs_i_size_write(inode, 0); 1866 1867 f2fs_submit_merged_write_cond(F2FS_I_SB(inode), 1868 inode, NULL, 0, DATA); 1869 truncate_inode_pages_final(inode->i_mapping); 1870 1871 if (F2FS_HAS_BLOCKS(inode)) 1872 f2fs_truncate(inode); 1873 1874 sb_end_intwrite(inode->i_sb); 1875 1876 spin_lock(&inode->i_lock); 1877 atomic_dec(&inode->i_count); 1878 } 1879 trace_f2fs_drop_inode(inode, 0); 1880 return 0; 1881 } 1882 ret = inode_generic_drop(inode); 1883 if (!ret) 1884 ret = fscrypt_drop_inode(inode); 1885 trace_f2fs_drop_inode(inode, ret); 1886 return ret; 1887 } 1888 1889 int f2fs_inode_dirtied(struct inode *inode, bool sync) 1890 { 1891 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1892 int ret = 0; 1893 1894 spin_lock(&sbi->inode_lock[DIRTY_META]); 1895 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) { 1896 ret = 1; 1897 } else { 1898 set_inode_flag(inode, FI_DIRTY_INODE); 1899 stat_inc_dirty_inode(sbi, DIRTY_META); 1900 } 1901 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) { 1902 list_add_tail(&F2FS_I(inode)->gdirty_list, 1903 &sbi->inode_list[DIRTY_META]); 1904 inc_page_count(sbi, F2FS_DIRTY_IMETA); 1905 } 1906 spin_unlock(&sbi->inode_lock[DIRTY_META]); 1907 1908 /* if atomic write is not committed, set inode w/ atomic dirty */ 1909 if (!ret && f2fs_is_atomic_file(inode) && 1910 !is_inode_flag_set(inode, FI_ATOMIC_COMMITTED)) 1911 set_inode_flag(inode, FI_ATOMIC_DIRTIED); 1912 1913 return ret; 1914 } 1915 1916 void f2fs_inode_synced(struct inode *inode) 1917 { 1918 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1919 1920 spin_lock(&sbi->inode_lock[DIRTY_META]); 1921 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) { 1922 spin_unlock(&sbi->inode_lock[DIRTY_META]); 1923 return; 1924 } 1925 if (!list_empty(&F2FS_I(inode)->gdirty_list)) { 1926 list_del_init(&F2FS_I(inode)->gdirty_list); 1927 dec_page_count(sbi, F2FS_DIRTY_IMETA); 1928 } 1929 clear_inode_flag(inode, FI_DIRTY_INODE); 1930 clear_inode_flag(inode, FI_AUTO_RECOVER); 1931 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META); 1932 spin_unlock(&sbi->inode_lock[DIRTY_META]); 1933 } 1934 1935 /* 1936 * f2fs_dirty_inode() is called from __mark_inode_dirty() 1937 * 1938 * We should call set_dirty_inode to write the dirty inode through write_inode. 1939 */ 1940 static void f2fs_dirty_inode(struct inode *inode, int flags) 1941 { 1942 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1943 1944 if (inode->i_ino == F2FS_NODE_INO(sbi) || 1945 inode->i_ino == F2FS_META_INO(sbi)) 1946 return; 1947 1948 if (is_inode_flag_set(inode, FI_AUTO_RECOVER)) 1949 clear_inode_flag(inode, FI_AUTO_RECOVER); 1950 1951 f2fs_inode_dirtied(inode, false); 1952 } 1953 1954 static void f2fs_free_inode(struct inode *inode) 1955 { 1956 fscrypt_free_inode(inode); 1957 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode)); 1958 } 1959 1960 static void destroy_percpu_info(struct f2fs_sb_info *sbi) 1961 { 1962 percpu_counter_destroy(&sbi->total_valid_inode_count); 1963 percpu_counter_destroy(&sbi->rf_node_block_count); 1964 percpu_counter_destroy(&sbi->alloc_valid_block_count); 1965 } 1966 1967 static void destroy_device_list(struct f2fs_sb_info *sbi) 1968 { 1969 int i; 1970 1971 for (i = 0; i < sbi->s_ndevs; i++) { 1972 if (i > 0) 1973 bdev_fput(FDEV(i).bdev_file); 1974 #ifdef CONFIG_BLK_DEV_ZONED 1975 kvfree(FDEV(i).blkz_seq); 1976 #endif 1977 } 1978 kvfree(sbi->devs); 1979 } 1980 1981 static void f2fs_put_super(struct super_block *sb) 1982 { 1983 struct f2fs_sb_info *sbi = F2FS_SB(sb); 1984 int i; 1985 int err = 0; 1986 bool done; 1987 1988 /* unregister procfs/sysfs entries in advance to avoid race case */ 1989 f2fs_unregister_sysfs(sbi); 1990 1991 f2fs_quota_off_umount(sb); 1992 1993 /* prevent remaining shrinker jobs */ 1994 mutex_lock(&sbi->umount_mutex); 1995 1996 /* 1997 * flush all issued checkpoints and stop checkpoint issue thread. 1998 * after then, all checkpoints should be done by each process context. 1999 */ 2000 f2fs_stop_ckpt_thread(sbi); 2001 2002 /* 2003 * We don't need to do checkpoint when superblock is clean. 2004 * But, the previous checkpoint was not done by umount, it needs to do 2005 * clean checkpoint again. 2006 */ 2007 if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) || 2008 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) { 2009 struct cp_control cpc = { 2010 .reason = CP_UMOUNT, 2011 }; 2012 stat_inc_cp_call_count(sbi, TOTAL_CALL); 2013 err = f2fs_write_checkpoint(sbi, &cpc); 2014 } 2015 2016 /* be sure to wait for any on-going discard commands */ 2017 done = f2fs_issue_discard_timeout(sbi, true); 2018 if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) { 2019 struct cp_control cpc = { 2020 .reason = CP_UMOUNT | CP_TRIMMED, 2021 }; 2022 stat_inc_cp_call_count(sbi, TOTAL_CALL); 2023 err = f2fs_write_checkpoint(sbi, &cpc); 2024 } 2025 2026 /* 2027 * normally superblock is clean, so we need to release this. 2028 * In addition, EIO will skip do checkpoint, we need this as well. 2029 */ 2030 f2fs_release_ino_entry(sbi, true); 2031 2032 f2fs_leave_shrinker(sbi); 2033 mutex_unlock(&sbi->umount_mutex); 2034 2035 /* our cp_error case, we can wait for any writeback page */ 2036 f2fs_flush_merged_writes(sbi); 2037 2038 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); 2039 2040 if (err || f2fs_cp_error(sbi)) { 2041 truncate_inode_pages_final(NODE_MAPPING(sbi)); 2042 truncate_inode_pages_final(META_MAPPING(sbi)); 2043 } 2044 2045 f2fs_bug_on(sbi, sbi->fsync_node_num); 2046 2047 f2fs_destroy_compress_inode(sbi); 2048 2049 iput(sbi->node_inode); 2050 sbi->node_inode = NULL; 2051 2052 iput(sbi->meta_inode); 2053 sbi->meta_inode = NULL; 2054 2055 /* Should check the page counts after dropping all node/meta pages */ 2056 for (i = 0; i < NR_COUNT_TYPE; i++) { 2057 if (!get_pages(sbi, i)) 2058 continue; 2059 f2fs_err(sbi, "detect filesystem reference count leak during " 2060 "umount, type: %d, count: %lld", i, get_pages(sbi, i)); 2061 f2fs_bug_on(sbi, 1); 2062 } 2063 2064 /* 2065 * iput() can update stat information, if f2fs_write_checkpoint() 2066 * above failed with error. 2067 */ 2068 f2fs_destroy_stats(sbi); 2069 2070 /* destroy f2fs internal modules */ 2071 f2fs_destroy_node_manager(sbi); 2072 f2fs_destroy_segment_manager(sbi); 2073 2074 /* flush s_error_work before sbi destroy */ 2075 flush_work(&sbi->s_error_work); 2076 2077 f2fs_destroy_post_read_wq(sbi); 2078 2079 kvfree(sbi->ckpt); 2080 2081 kfree(sbi->raw_super); 2082 2083 f2fs_destroy_page_array_cache(sbi); 2084 #ifdef CONFIG_QUOTA 2085 for (i = 0; i < MAXQUOTAS; i++) 2086 kfree(F2FS_OPTION(sbi).s_qf_names[i]); 2087 #endif 2088 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy); 2089 destroy_percpu_info(sbi); 2090 f2fs_destroy_iostat(sbi); 2091 for (i = 0; i < NR_PAGE_TYPE; i++) 2092 kfree(sbi->write_io[i]); 2093 #if IS_ENABLED(CONFIG_UNICODE) 2094 utf8_unload(sb->s_encoding); 2095 #endif 2096 sync_blockdev(sb->s_bdev); 2097 invalidate_bdev(sb->s_bdev); 2098 for (i = 1; i < sbi->s_ndevs; i++) { 2099 sync_blockdev(FDEV(i).bdev); 2100 invalidate_bdev(FDEV(i).bdev); 2101 } 2102 } 2103 2104 int f2fs_sync_fs(struct super_block *sb, int sync) 2105 { 2106 struct f2fs_sb_info *sbi = F2FS_SB(sb); 2107 int err = 0; 2108 2109 if (unlikely(f2fs_cp_error(sbi))) 2110 return 0; 2111 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2112 return 0; 2113 2114 trace_f2fs_sync_fs(sb, sync); 2115 2116 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 2117 return -EAGAIN; 2118 2119 if (sync) { 2120 stat_inc_cp_call_count(sbi, TOTAL_CALL); 2121 err = f2fs_issue_checkpoint(sbi); 2122 } 2123 2124 return err; 2125 } 2126 2127 static int f2fs_freeze(struct super_block *sb) 2128 { 2129 struct f2fs_sb_info *sbi = F2FS_SB(sb); 2130 2131 if (f2fs_readonly(sb)) 2132 return 0; 2133 2134 /* IO error happened before */ 2135 if (unlikely(f2fs_cp_error(sbi))) 2136 return -EIO; 2137 2138 /* must be clean, since sync_filesystem() was already called */ 2139 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY)) 2140 return -EINVAL; 2141 2142 sbi->umount_lock_holder = current; 2143 2144 /* Let's flush checkpoints and stop the thread. */ 2145 f2fs_flush_ckpt_thread(sbi); 2146 2147 sbi->umount_lock_holder = NULL; 2148 2149 /* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */ 2150 set_sbi_flag(sbi, SBI_IS_FREEZING); 2151 return 0; 2152 } 2153 2154 static int f2fs_unfreeze(struct super_block *sb) 2155 { 2156 struct f2fs_sb_info *sbi = F2FS_SB(sb); 2157 2158 /* 2159 * It will update discard_max_bytes of mounted lvm device to zero 2160 * after creating snapshot on this lvm device, let's drop all 2161 * remained discards. 2162 * We don't need to disable real-time discard because discard_max_bytes 2163 * will recover after removal of snapshot. 2164 */ 2165 if (test_opt(sbi, DISCARD) && !f2fs_hw_support_discard(sbi)) 2166 f2fs_issue_discard_timeout(sbi, true); 2167 2168 clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING); 2169 return 0; 2170 } 2171 2172 #ifdef CONFIG_QUOTA 2173 static int f2fs_statfs_project(struct super_block *sb, 2174 kprojid_t projid, struct kstatfs *buf) 2175 { 2176 struct kqid qid; 2177 struct dquot *dquot; 2178 u64 limit; 2179 u64 curblock; 2180 2181 qid = make_kqid_projid(projid); 2182 dquot = dqget(sb, qid); 2183 if (IS_ERR(dquot)) 2184 return PTR_ERR(dquot); 2185 spin_lock(&dquot->dq_dqb_lock); 2186 2187 limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit, 2188 dquot->dq_dqb.dqb_bhardlimit); 2189 limit >>= sb->s_blocksize_bits; 2190 2191 if (limit) { 2192 uint64_t remaining = 0; 2193 2194 curblock = (dquot->dq_dqb.dqb_curspace + 2195 dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; 2196 if (limit > curblock) 2197 remaining = limit - curblock; 2198 2199 buf->f_blocks = min(buf->f_blocks, limit); 2200 buf->f_bfree = min(buf->f_bfree, remaining); 2201 buf->f_bavail = min(buf->f_bavail, remaining); 2202 } 2203 2204 limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit, 2205 dquot->dq_dqb.dqb_ihardlimit); 2206 2207 if (limit) { 2208 uint64_t remaining = 0; 2209 2210 if (limit > dquot->dq_dqb.dqb_curinodes) 2211 remaining = limit - dquot->dq_dqb.dqb_curinodes; 2212 2213 buf->f_files = min(buf->f_files, limit); 2214 buf->f_ffree = min(buf->f_ffree, remaining); 2215 } 2216 2217 spin_unlock(&dquot->dq_dqb_lock); 2218 dqput(dquot); 2219 return 0; 2220 } 2221 #endif 2222 2223 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf) 2224 { 2225 struct super_block *sb = dentry->d_sb; 2226 struct f2fs_sb_info *sbi = F2FS_SB(sb); 2227 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 2228 block_t total_count, user_block_count, start_count; 2229 u64 avail_node_count; 2230 unsigned int total_valid_node_count; 2231 2232 total_count = le64_to_cpu(sbi->raw_super->block_count); 2233 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr); 2234 buf->f_type = F2FS_SUPER_MAGIC; 2235 buf->f_bsize = sbi->blocksize; 2236 2237 buf->f_blocks = total_count - start_count; 2238 2239 spin_lock(&sbi->stat_lock); 2240 if (sbi->carve_out) 2241 buf->f_blocks -= sbi->current_reserved_blocks; 2242 user_block_count = sbi->user_block_count; 2243 total_valid_node_count = valid_node_count(sbi); 2244 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; 2245 buf->f_bfree = user_block_count - valid_user_blocks(sbi) - 2246 sbi->current_reserved_blocks; 2247 2248 if (unlikely(buf->f_bfree <= sbi->unusable_block_count)) 2249 buf->f_bfree = 0; 2250 else 2251 buf->f_bfree -= sbi->unusable_block_count; 2252 spin_unlock(&sbi->stat_lock); 2253 2254 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks) 2255 buf->f_bavail = buf->f_bfree - 2256 F2FS_OPTION(sbi).root_reserved_blocks; 2257 else 2258 buf->f_bavail = 0; 2259 2260 if (avail_node_count > user_block_count) { 2261 buf->f_files = user_block_count; 2262 buf->f_ffree = buf->f_bavail; 2263 } else { 2264 buf->f_files = avail_node_count; 2265 buf->f_ffree = min(avail_node_count - total_valid_node_count, 2266 buf->f_bavail); 2267 } 2268 2269 buf->f_namelen = F2FS_NAME_LEN; 2270 buf->f_fsid = u64_to_fsid(id); 2271 2272 #ifdef CONFIG_QUOTA 2273 if (is_inode_flag_set(d_inode(dentry), FI_PROJ_INHERIT) && 2274 sb_has_quota_limits_enabled(sb, PRJQUOTA)) { 2275 f2fs_statfs_project(sb, F2FS_I(d_inode(dentry))->i_projid, buf); 2276 } 2277 #endif 2278 return 0; 2279 } 2280 2281 static inline void f2fs_show_quota_options(struct seq_file *seq, 2282 struct super_block *sb) 2283 { 2284 #ifdef CONFIG_QUOTA 2285 struct f2fs_sb_info *sbi = F2FS_SB(sb); 2286 2287 if (F2FS_OPTION(sbi).s_jquota_fmt) { 2288 char *fmtname = ""; 2289 2290 switch (F2FS_OPTION(sbi).s_jquota_fmt) { 2291 case QFMT_VFS_OLD: 2292 fmtname = "vfsold"; 2293 break; 2294 case QFMT_VFS_V0: 2295 fmtname = "vfsv0"; 2296 break; 2297 case QFMT_VFS_V1: 2298 fmtname = "vfsv1"; 2299 break; 2300 } 2301 seq_printf(seq, ",jqfmt=%s", fmtname); 2302 } 2303 2304 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA]) 2305 seq_show_option(seq, "usrjquota", 2306 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]); 2307 2308 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]) 2309 seq_show_option(seq, "grpjquota", 2310 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]); 2311 2312 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) 2313 seq_show_option(seq, "prjjquota", 2314 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]); 2315 #endif 2316 } 2317 2318 #ifdef CONFIG_F2FS_FS_COMPRESSION 2319 static inline void f2fs_show_compress_options(struct seq_file *seq, 2320 struct super_block *sb) 2321 { 2322 struct f2fs_sb_info *sbi = F2FS_SB(sb); 2323 char *algtype = ""; 2324 int i; 2325 2326 if (!f2fs_sb_has_compression(sbi)) 2327 return; 2328 2329 switch (F2FS_OPTION(sbi).compress_algorithm) { 2330 case COMPRESS_LZO: 2331 algtype = "lzo"; 2332 break; 2333 case COMPRESS_LZ4: 2334 algtype = "lz4"; 2335 break; 2336 case COMPRESS_ZSTD: 2337 algtype = "zstd"; 2338 break; 2339 case COMPRESS_LZORLE: 2340 algtype = "lzo-rle"; 2341 break; 2342 } 2343 seq_printf(seq, ",compress_algorithm=%s", algtype); 2344 2345 if (F2FS_OPTION(sbi).compress_level) 2346 seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level); 2347 2348 seq_printf(seq, ",compress_log_size=%u", 2349 F2FS_OPTION(sbi).compress_log_size); 2350 2351 for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) { 2352 seq_printf(seq, ",compress_extension=%s", 2353 F2FS_OPTION(sbi).extensions[i]); 2354 } 2355 2356 for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) { 2357 seq_printf(seq, ",nocompress_extension=%s", 2358 F2FS_OPTION(sbi).noextensions[i]); 2359 } 2360 2361 if (F2FS_OPTION(sbi).compress_chksum) 2362 seq_puts(seq, ",compress_chksum"); 2363 2364 if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS) 2365 seq_printf(seq, ",compress_mode=%s", "fs"); 2366 else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER) 2367 seq_printf(seq, ",compress_mode=%s", "user"); 2368 2369 if (test_opt(sbi, COMPRESS_CACHE)) 2370 seq_puts(seq, ",compress_cache"); 2371 } 2372 #endif 2373 2374 static int f2fs_show_options(struct seq_file *seq, struct dentry *root) 2375 { 2376 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb); 2377 2378 if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) 2379 seq_printf(seq, ",background_gc=%s", "sync"); 2380 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON) 2381 seq_printf(seq, ",background_gc=%s", "on"); 2382 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF) 2383 seq_printf(seq, ",background_gc=%s", "off"); 2384 2385 if (test_opt(sbi, GC_MERGE)) 2386 seq_puts(seq, ",gc_merge"); 2387 else 2388 seq_puts(seq, ",nogc_merge"); 2389 2390 if (test_opt(sbi, DISABLE_ROLL_FORWARD)) 2391 seq_puts(seq, ",disable_roll_forward"); 2392 if (test_opt(sbi, NORECOVERY)) 2393 seq_puts(seq, ",norecovery"); 2394 if (test_opt(sbi, DISCARD)) { 2395 seq_puts(seq, ",discard"); 2396 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK) 2397 seq_printf(seq, ",discard_unit=%s", "block"); 2398 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT) 2399 seq_printf(seq, ",discard_unit=%s", "segment"); 2400 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION) 2401 seq_printf(seq, ",discard_unit=%s", "section"); 2402 } else { 2403 seq_puts(seq, ",nodiscard"); 2404 } 2405 #ifdef CONFIG_F2FS_FS_XATTR 2406 if (test_opt(sbi, XATTR_USER)) 2407 seq_puts(seq, ",user_xattr"); 2408 else 2409 seq_puts(seq, ",nouser_xattr"); 2410 if (test_opt(sbi, INLINE_XATTR)) 2411 seq_puts(seq, ",inline_xattr"); 2412 else 2413 seq_puts(seq, ",noinline_xattr"); 2414 if (test_opt(sbi, INLINE_XATTR_SIZE)) 2415 seq_printf(seq, ",inline_xattr_size=%u", 2416 F2FS_OPTION(sbi).inline_xattr_size); 2417 #endif 2418 #ifdef CONFIG_F2FS_FS_POSIX_ACL 2419 if (test_opt(sbi, POSIX_ACL)) 2420 seq_puts(seq, ",acl"); 2421 else 2422 seq_puts(seq, ",noacl"); 2423 #endif 2424 if (test_opt(sbi, DISABLE_EXT_IDENTIFY)) 2425 seq_puts(seq, ",disable_ext_identify"); 2426 if (test_opt(sbi, INLINE_DATA)) 2427 seq_puts(seq, ",inline_data"); 2428 else 2429 seq_puts(seq, ",noinline_data"); 2430 if (test_opt(sbi, INLINE_DENTRY)) 2431 seq_puts(seq, ",inline_dentry"); 2432 else 2433 seq_puts(seq, ",noinline_dentry"); 2434 if (test_opt(sbi, FLUSH_MERGE)) 2435 seq_puts(seq, ",flush_merge"); 2436 else 2437 seq_puts(seq, ",noflush_merge"); 2438 if (test_opt(sbi, NOBARRIER)) 2439 seq_puts(seq, ",nobarrier"); 2440 else 2441 seq_puts(seq, ",barrier"); 2442 if (test_opt(sbi, FASTBOOT)) 2443 seq_puts(seq, ",fastboot"); 2444 if (test_opt(sbi, READ_EXTENT_CACHE)) 2445 seq_puts(seq, ",extent_cache"); 2446 else 2447 seq_puts(seq, ",noextent_cache"); 2448 if (test_opt(sbi, AGE_EXTENT_CACHE)) 2449 seq_puts(seq, ",age_extent_cache"); 2450 if (test_opt(sbi, DATA_FLUSH)) 2451 seq_puts(seq, ",data_flush"); 2452 2453 seq_puts(seq, ",mode="); 2454 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE) 2455 seq_puts(seq, "adaptive"); 2456 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS) 2457 seq_puts(seq, "lfs"); 2458 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG) 2459 seq_puts(seq, "fragment:segment"); 2460 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) 2461 seq_puts(seq, "fragment:block"); 2462 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs); 2463 if (test_opt(sbi, RESERVE_ROOT) || test_opt(sbi, RESERVE_NODE)) 2464 seq_printf(seq, ",reserve_root=%u,reserve_node=%u,resuid=%u," 2465 "resgid=%u", 2466 F2FS_OPTION(sbi).root_reserved_blocks, 2467 F2FS_OPTION(sbi).root_reserved_nodes, 2468 from_kuid_munged(&init_user_ns, 2469 F2FS_OPTION(sbi).s_resuid), 2470 from_kgid_munged(&init_user_ns, 2471 F2FS_OPTION(sbi).s_resgid)); 2472 #ifdef CONFIG_F2FS_FAULT_INJECTION 2473 if (test_opt(sbi, FAULT_INJECTION)) { 2474 seq_printf(seq, ",fault_injection=%u", 2475 F2FS_OPTION(sbi).fault_info.inject_rate); 2476 seq_printf(seq, ",fault_type=%u", 2477 F2FS_OPTION(sbi).fault_info.inject_type); 2478 } 2479 #endif 2480 #ifdef CONFIG_QUOTA 2481 if (test_opt(sbi, QUOTA)) 2482 seq_puts(seq, ",quota"); 2483 if (test_opt(sbi, USRQUOTA)) 2484 seq_puts(seq, ",usrquota"); 2485 if (test_opt(sbi, GRPQUOTA)) 2486 seq_puts(seq, ",grpquota"); 2487 if (test_opt(sbi, PRJQUOTA)) 2488 seq_puts(seq, ",prjquota"); 2489 #endif 2490 f2fs_show_quota_options(seq, sbi->sb); 2491 2492 fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb); 2493 2494 if (sbi->sb->s_flags & SB_INLINECRYPT) 2495 seq_puts(seq, ",inlinecrypt"); 2496 2497 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT) 2498 seq_printf(seq, ",alloc_mode=%s", "default"); 2499 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) 2500 seq_printf(seq, ",alloc_mode=%s", "reuse"); 2501 2502 if (test_opt(sbi, DISABLE_CHECKPOINT)) 2503 seq_printf(seq, ",checkpoint=disable:%u", 2504 F2FS_OPTION(sbi).unusable_cap); 2505 if (test_opt(sbi, MERGE_CHECKPOINT)) 2506 seq_puts(seq, ",checkpoint_merge"); 2507 else 2508 seq_puts(seq, ",nocheckpoint_merge"); 2509 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX) 2510 seq_printf(seq, ",fsync_mode=%s", "posix"); 2511 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) 2512 seq_printf(seq, ",fsync_mode=%s", "strict"); 2513 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER) 2514 seq_printf(seq, ",fsync_mode=%s", "nobarrier"); 2515 2516 #ifdef CONFIG_F2FS_FS_COMPRESSION 2517 f2fs_show_compress_options(seq, sbi->sb); 2518 #endif 2519 2520 if (test_opt(sbi, ATGC)) 2521 seq_puts(seq, ",atgc"); 2522 2523 if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL) 2524 seq_printf(seq, ",memory=%s", "normal"); 2525 else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW) 2526 seq_printf(seq, ",memory=%s", "low"); 2527 2528 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY) 2529 seq_printf(seq, ",errors=%s", "remount-ro"); 2530 else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE) 2531 seq_printf(seq, ",errors=%s", "continue"); 2532 else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC) 2533 seq_printf(seq, ",errors=%s", "panic"); 2534 2535 if (test_opt(sbi, NAT_BITS)) 2536 seq_puts(seq, ",nat_bits"); 2537 2538 if (F2FS_OPTION(sbi).lookup_mode == LOOKUP_PERF) 2539 seq_show_option(seq, "lookup_mode", "perf"); 2540 else if (F2FS_OPTION(sbi).lookup_mode == LOOKUP_COMPAT) 2541 seq_show_option(seq, "lookup_mode", "compat"); 2542 else if (F2FS_OPTION(sbi).lookup_mode == LOOKUP_AUTO) 2543 seq_show_option(seq, "lookup_mode", "auto"); 2544 2545 return 0; 2546 } 2547 2548 static void default_options(struct f2fs_sb_info *sbi, bool remount) 2549 { 2550 /* init some FS parameters */ 2551 if (!remount) { 2552 set_opt(sbi, READ_EXTENT_CACHE); 2553 clear_opt(sbi, DISABLE_CHECKPOINT); 2554 2555 if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) 2556 set_opt(sbi, DISCARD); 2557 2558 if (f2fs_sb_has_blkzoned(sbi)) 2559 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION; 2560 else 2561 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK; 2562 } 2563 2564 if (f2fs_sb_has_readonly(sbi)) 2565 F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE; 2566 else 2567 F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE; 2568 2569 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; 2570 if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main) <= 2571 SMALL_VOLUME_SEGMENTS) 2572 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; 2573 else 2574 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; 2575 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; 2576 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID); 2577 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID); 2578 if (f2fs_sb_has_compression(sbi)) { 2579 F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4; 2580 F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE; 2581 F2FS_OPTION(sbi).compress_ext_cnt = 0; 2582 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS; 2583 } 2584 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; 2585 F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL; 2586 F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE; 2587 2588 set_opt(sbi, INLINE_XATTR); 2589 set_opt(sbi, INLINE_DATA); 2590 set_opt(sbi, INLINE_DENTRY); 2591 set_opt(sbi, MERGE_CHECKPOINT); 2592 set_opt(sbi, LAZYTIME); 2593 F2FS_OPTION(sbi).unusable_cap = 0; 2594 if (!f2fs_is_readonly(sbi)) 2595 set_opt(sbi, FLUSH_MERGE); 2596 if (f2fs_sb_has_blkzoned(sbi)) 2597 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; 2598 else 2599 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; 2600 2601 #ifdef CONFIG_F2FS_FS_XATTR 2602 set_opt(sbi, XATTR_USER); 2603 #endif 2604 #ifdef CONFIG_F2FS_FS_POSIX_ACL 2605 set_opt(sbi, POSIX_ACL); 2606 #endif 2607 2608 f2fs_build_fault_attr(sbi, 0, 0, FAULT_ALL); 2609 2610 F2FS_OPTION(sbi).lookup_mode = LOOKUP_PERF; 2611 } 2612 2613 #ifdef CONFIG_QUOTA 2614 static int f2fs_enable_quotas(struct super_block *sb); 2615 #endif 2616 2617 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) 2618 { 2619 unsigned int s_flags = sbi->sb->s_flags; 2620 struct cp_control cpc; 2621 struct f2fs_lock_context lc; 2622 unsigned int gc_mode = sbi->gc_mode; 2623 int err = 0; 2624 int ret; 2625 block_t unusable; 2626 2627 if (s_flags & SB_RDONLY) { 2628 f2fs_err(sbi, "checkpoint=disable on readonly fs"); 2629 return -EINVAL; 2630 } 2631 sbi->sb->s_flags |= SB_ACTIVE; 2632 2633 /* check if we need more GC first */ 2634 unusable = f2fs_get_unusable_blocks(sbi); 2635 if (!f2fs_disable_cp_again(sbi, unusable)) 2636 goto skip_gc; 2637 2638 f2fs_update_time(sbi, DISABLE_TIME); 2639 2640 sbi->gc_mode = GC_URGENT_HIGH; 2641 2642 while (!f2fs_time_over(sbi, DISABLE_TIME)) { 2643 struct f2fs_gc_control gc_control = { 2644 .victim_segno = NULL_SEGNO, 2645 .init_gc_type = FG_GC, 2646 .should_migrate_blocks = false, 2647 .err_gc_skipped = true, 2648 .no_bg_gc = true, 2649 .nr_free_secs = 1 }; 2650 2651 f2fs_down_write_trace(&sbi->gc_lock, &gc_control.lc); 2652 stat_inc_gc_call_count(sbi, FOREGROUND); 2653 err = f2fs_gc(sbi, &gc_control); 2654 if (err == -ENODATA) { 2655 err = 0; 2656 break; 2657 } 2658 if (err && err != -EAGAIN) 2659 break; 2660 } 2661 2662 ret = sync_filesystem(sbi->sb); 2663 if (ret || err) { 2664 err = ret ? ret : err; 2665 goto restore_flag; 2666 } 2667 2668 unusable = f2fs_get_unusable_blocks(sbi); 2669 if (f2fs_disable_cp_again(sbi, unusable)) { 2670 err = -EAGAIN; 2671 goto restore_flag; 2672 } 2673 2674 skip_gc: 2675 f2fs_down_write_trace(&sbi->gc_lock, &lc); 2676 cpc.reason = CP_PAUSE; 2677 set_sbi_flag(sbi, SBI_CP_DISABLED); 2678 stat_inc_cp_call_count(sbi, TOTAL_CALL); 2679 err = f2fs_write_checkpoint(sbi, &cpc); 2680 if (err) 2681 goto out_unlock; 2682 2683 spin_lock(&sbi->stat_lock); 2684 sbi->unusable_block_count = unusable; 2685 spin_unlock(&sbi->stat_lock); 2686 2687 out_unlock: 2688 f2fs_up_write_trace(&sbi->gc_lock, &lc); 2689 restore_flag: 2690 sbi->gc_mode = gc_mode; 2691 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ 2692 f2fs_info(sbi, "f2fs_disable_checkpoint() finish, err:%d", err); 2693 return err; 2694 } 2695 2696 static int f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) 2697 { 2698 int retry = MAX_FLUSH_RETRY_COUNT; 2699 long long start, writeback, end; 2700 int ret; 2701 struct f2fs_lock_context lc; 2702 long long skipped_write, dirty_data; 2703 2704 f2fs_info(sbi, "f2fs_enable_checkpoint() starts, meta: %lld, node: %lld, data: %lld", 2705 get_pages(sbi, F2FS_DIRTY_META), 2706 get_pages(sbi, F2FS_DIRTY_NODES), 2707 get_pages(sbi, F2FS_DIRTY_DATA)); 2708 2709 start = ktime_get(); 2710 2711 set_sbi_flag(sbi, SBI_ENABLE_CHECKPOINT); 2712 2713 /* we should flush all the data to keep data consistency */ 2714 do { 2715 skipped_write = get_pages(sbi, F2FS_SKIPPED_WRITE); 2716 dirty_data = get_pages(sbi, F2FS_DIRTY_DATA); 2717 2718 sync_inodes_sb(sbi->sb); 2719 f2fs_io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT); 2720 2721 f2fs_info(sbi, "sync_inode_sb done, dirty_data: %lld, %lld, " 2722 "skipped write: %lld, %lld, retry: %d", 2723 get_pages(sbi, F2FS_DIRTY_DATA), 2724 dirty_data, 2725 get_pages(sbi, F2FS_SKIPPED_WRITE), 2726 skipped_write, retry); 2727 2728 /* 2729 * sync_inodes_sb() has retry logic, so let's check dirty_data 2730 * in prior to skipped_write in case there is no dirty data. 2731 */ 2732 if (!get_pages(sbi, F2FS_DIRTY_DATA)) 2733 break; 2734 if (get_pages(sbi, F2FS_SKIPPED_WRITE) == skipped_write) 2735 break; 2736 } while (retry--); 2737 2738 clear_sbi_flag(sbi, SBI_ENABLE_CHECKPOINT); 2739 2740 writeback = ktime_get(); 2741 2742 if (unlikely(get_pages(sbi, F2FS_DIRTY_DATA) || 2743 get_pages(sbi, F2FS_SKIPPED_WRITE))) 2744 f2fs_warn(sbi, "checkpoint=enable unwritten data: %lld, skipped data: %lld, retry: %d", 2745 get_pages(sbi, F2FS_DIRTY_DATA), 2746 get_pages(sbi, F2FS_SKIPPED_WRITE), retry); 2747 2748 if (get_pages(sbi, F2FS_SKIPPED_WRITE)) 2749 atomic_set(&sbi->nr_pages[F2FS_SKIPPED_WRITE], 0); 2750 2751 f2fs_down_write_trace(&sbi->gc_lock, &lc); 2752 f2fs_dirty_to_prefree(sbi); 2753 2754 clear_sbi_flag(sbi, SBI_CP_DISABLED); 2755 set_sbi_flag(sbi, SBI_IS_DIRTY); 2756 f2fs_up_write_trace(&sbi->gc_lock, &lc); 2757 2758 ret = f2fs_sync_fs(sbi->sb, 1); 2759 if (ret) 2760 f2fs_err(sbi, "%s sync_fs failed, ret: %d", __func__, ret); 2761 2762 /* Let's ensure there's no pending checkpoint anymore */ 2763 f2fs_flush_ckpt_thread(sbi); 2764 2765 end = ktime_get(); 2766 2767 f2fs_info(sbi, "f2fs_enable_checkpoint() finishes, writeback:%llu, sync:%llu", 2768 ktime_ms_delta(writeback, start), 2769 ktime_ms_delta(end, writeback)); 2770 return ret; 2771 } 2772 2773 static int __f2fs_remount(struct fs_context *fc, struct super_block *sb) 2774 { 2775 struct f2fs_sb_info *sbi = F2FS_SB(sb); 2776 struct f2fs_mount_info org_mount_opt; 2777 unsigned long old_sb_flags; 2778 unsigned int flags = fc->sb_flags; 2779 int err; 2780 bool need_restart_gc = false, need_stop_gc = false; 2781 bool need_restart_flush = false, need_stop_flush = false; 2782 bool need_restart_discard = false, need_stop_discard = false; 2783 bool need_enable_checkpoint = false, need_disable_checkpoint = false; 2784 bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE); 2785 bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE); 2786 bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT); 2787 bool no_atgc = !test_opt(sbi, ATGC); 2788 bool no_discard = !test_opt(sbi, DISCARD); 2789 bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE); 2790 bool block_unit_discard = f2fs_block_unit_discard(sbi); 2791 bool no_nat_bits = !test_opt(sbi, NAT_BITS); 2792 #ifdef CONFIG_QUOTA 2793 int i, j; 2794 #endif 2795 2796 /* 2797 * Save the old mount options in case we 2798 * need to restore them. 2799 */ 2800 org_mount_opt = sbi->mount_opt; 2801 old_sb_flags = sb->s_flags; 2802 2803 sbi->umount_lock_holder = current; 2804 2805 #ifdef CONFIG_QUOTA 2806 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt; 2807 for (i = 0; i < MAXQUOTAS; i++) { 2808 if (F2FS_OPTION(sbi).s_qf_names[i]) { 2809 org_mount_opt.s_qf_names[i] = 2810 kstrdup(F2FS_OPTION(sbi).s_qf_names[i], 2811 GFP_KERNEL); 2812 if (!org_mount_opt.s_qf_names[i]) { 2813 for (j = 0; j < i; j++) 2814 kfree(org_mount_opt.s_qf_names[j]); 2815 return -ENOMEM; 2816 } 2817 } else { 2818 org_mount_opt.s_qf_names[i] = NULL; 2819 } 2820 } 2821 #endif 2822 2823 /* recover superblocks we couldn't write due to previous RO mount */ 2824 if (!(flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) { 2825 err = f2fs_commit_super(sbi, false); 2826 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d", 2827 err); 2828 if (!err) 2829 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE); 2830 } 2831 2832 default_options(sbi, true); 2833 2834 err = f2fs_check_opt_consistency(fc, sb); 2835 if (err) 2836 goto restore_opts; 2837 2838 f2fs_apply_options(fc, sb); 2839 2840 err = f2fs_sanity_check_options(sbi, true); 2841 if (err) 2842 goto restore_opts; 2843 2844 /* flush outstanding errors before changing fs state */ 2845 flush_work(&sbi->s_error_work); 2846 2847 /* 2848 * Previous and new state of filesystem is RO, 2849 * so skip checking GC and FLUSH_MERGE conditions. 2850 */ 2851 if (f2fs_readonly(sb) && (flags & SB_RDONLY)) 2852 goto skip; 2853 2854 if (f2fs_dev_is_readonly(sbi) && !(flags & SB_RDONLY)) { 2855 err = -EROFS; 2856 goto restore_opts; 2857 } 2858 2859 #ifdef CONFIG_QUOTA 2860 if (!f2fs_readonly(sb) && (flags & SB_RDONLY)) { 2861 err = dquot_suspend(sb, -1); 2862 if (err < 0) 2863 goto restore_opts; 2864 } else if (f2fs_readonly(sb) && !(flags & SB_RDONLY)) { 2865 /* dquot_resume needs RW */ 2866 sb->s_flags &= ~SB_RDONLY; 2867 if (sb_any_quota_suspended(sb)) { 2868 dquot_resume(sb, -1); 2869 } else if (f2fs_sb_has_quota_ino(sbi)) { 2870 err = f2fs_enable_quotas(sb); 2871 if (err) 2872 goto restore_opts; 2873 } 2874 } 2875 #endif 2876 /* disallow enable atgc dynamically */ 2877 if (no_atgc == !!test_opt(sbi, ATGC)) { 2878 err = -EINVAL; 2879 f2fs_warn(sbi, "switch atgc option is not allowed"); 2880 goto restore_opts; 2881 } 2882 2883 /* disallow enable/disable extent_cache dynamically */ 2884 if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) { 2885 err = -EINVAL; 2886 f2fs_warn(sbi, "switch extent_cache option is not allowed"); 2887 goto restore_opts; 2888 } 2889 /* disallow enable/disable age extent_cache dynamically */ 2890 if (no_age_extent_cache == !!test_opt(sbi, AGE_EXTENT_CACHE)) { 2891 err = -EINVAL; 2892 f2fs_warn(sbi, "switch age_extent_cache option is not allowed"); 2893 goto restore_opts; 2894 } 2895 2896 if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) { 2897 err = -EINVAL; 2898 f2fs_warn(sbi, "switch compress_cache option is not allowed"); 2899 goto restore_opts; 2900 } 2901 2902 if (block_unit_discard != f2fs_block_unit_discard(sbi)) { 2903 err = -EINVAL; 2904 f2fs_warn(sbi, "switch discard_unit option is not allowed"); 2905 goto restore_opts; 2906 } 2907 2908 if (no_nat_bits == !!test_opt(sbi, NAT_BITS)) { 2909 err = -EINVAL; 2910 f2fs_warn(sbi, "switch nat_bits option is not allowed"); 2911 goto restore_opts; 2912 } 2913 2914 if ((flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) { 2915 err = -EINVAL; 2916 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only"); 2917 goto restore_opts; 2918 } 2919 2920 /* 2921 * We stop the GC thread if FS is mounted as RO 2922 * or if background_gc = off is passed in mount 2923 * option. Also sync the filesystem. 2924 */ 2925 if ((flags & SB_RDONLY) || 2926 (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF && 2927 !test_opt(sbi, GC_MERGE))) { 2928 if (sbi->gc_thread) { 2929 f2fs_stop_gc_thread(sbi); 2930 need_restart_gc = true; 2931 } 2932 } else if (!sbi->gc_thread) { 2933 err = f2fs_start_gc_thread(sbi); 2934 if (err) 2935 goto restore_opts; 2936 need_stop_gc = true; 2937 } 2938 2939 if (flags & SB_RDONLY) { 2940 sync_inodes_sb(sb); 2941 2942 set_sbi_flag(sbi, SBI_IS_DIRTY); 2943 set_sbi_flag(sbi, SBI_IS_CLOSE); 2944 f2fs_sync_fs(sb, 1); 2945 clear_sbi_flag(sbi, SBI_IS_CLOSE); 2946 } 2947 2948 /* 2949 * We stop issue flush thread if FS is mounted as RO 2950 * or if flush_merge is not passed in mount option. 2951 */ 2952 if ((flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) { 2953 clear_opt(sbi, FLUSH_MERGE); 2954 f2fs_destroy_flush_cmd_control(sbi, false); 2955 need_restart_flush = true; 2956 } else { 2957 err = f2fs_create_flush_cmd_control(sbi); 2958 if (err) 2959 goto restore_gc; 2960 need_stop_flush = true; 2961 } 2962 2963 if (no_discard == !!test_opt(sbi, DISCARD)) { 2964 if (test_opt(sbi, DISCARD)) { 2965 err = f2fs_start_discard_thread(sbi); 2966 if (err) 2967 goto restore_flush; 2968 need_stop_discard = true; 2969 } else { 2970 f2fs_stop_discard_thread(sbi); 2971 /* 2972 * f2fs_ioc_fitrim() won't race w/ "remount ro" 2973 * so it's safe to check discard_cmd_cnt in 2974 * f2fs_issue_discard_timeout(). 2975 */ 2976 f2fs_issue_discard_timeout(sbi, flags & SB_RDONLY); 2977 need_restart_discard = true; 2978 } 2979 } 2980 2981 adjust_unusable_cap_perc(sbi); 2982 if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) { 2983 if (test_opt(sbi, DISABLE_CHECKPOINT)) { 2984 err = f2fs_disable_checkpoint(sbi); 2985 if (err) 2986 goto restore_discard; 2987 need_enable_checkpoint = true; 2988 } else { 2989 err = f2fs_enable_checkpoint(sbi); 2990 if (err) 2991 goto restore_discard; 2992 need_disable_checkpoint = true; 2993 } 2994 } 2995 2996 /* 2997 * Place this routine at the end, since a new checkpoint would be 2998 * triggered while remount and we need to take care of it before 2999 * returning from remount. 3000 */ 3001 if ((flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) || 3002 !test_opt(sbi, MERGE_CHECKPOINT)) { 3003 f2fs_stop_ckpt_thread(sbi); 3004 } else { 3005 /* Flush if the previous checkpoint, if exists. */ 3006 f2fs_flush_ckpt_thread(sbi); 3007 3008 err = f2fs_start_ckpt_thread(sbi); 3009 if (err) { 3010 f2fs_err(sbi, 3011 "Failed to start F2FS issue_checkpoint_thread (%d)", 3012 err); 3013 goto restore_checkpoint; 3014 } 3015 } 3016 3017 skip: 3018 #ifdef CONFIG_QUOTA 3019 /* Release old quota file names */ 3020 for (i = 0; i < MAXQUOTAS; i++) 3021 kfree(org_mount_opt.s_qf_names[i]); 3022 #endif 3023 /* Update the POSIXACL Flag */ 3024 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 3025 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); 3026 3027 limit_reserve_root(sbi); 3028 fc->sb_flags = (flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); 3029 3030 sbi->umount_lock_holder = NULL; 3031 return 0; 3032 restore_checkpoint: 3033 if (need_enable_checkpoint) { 3034 if (f2fs_enable_checkpoint(sbi)) 3035 f2fs_warn(sbi, "checkpoint has not been enabled"); 3036 } else if (need_disable_checkpoint) { 3037 if (f2fs_disable_checkpoint(sbi)) 3038 f2fs_warn(sbi, "checkpoint has not been disabled"); 3039 } 3040 restore_discard: 3041 if (need_restart_discard) { 3042 if (f2fs_start_discard_thread(sbi)) 3043 f2fs_warn(sbi, "discard has been stopped"); 3044 } else if (need_stop_discard) { 3045 f2fs_stop_discard_thread(sbi); 3046 } 3047 restore_flush: 3048 if (need_restart_flush) { 3049 if (f2fs_create_flush_cmd_control(sbi)) 3050 f2fs_warn(sbi, "background flush thread has stopped"); 3051 } else if (need_stop_flush) { 3052 clear_opt(sbi, FLUSH_MERGE); 3053 f2fs_destroy_flush_cmd_control(sbi, false); 3054 } 3055 restore_gc: 3056 if (need_restart_gc) { 3057 if (f2fs_start_gc_thread(sbi)) 3058 f2fs_warn(sbi, "background gc thread has stopped"); 3059 } else if (need_stop_gc) { 3060 f2fs_stop_gc_thread(sbi); 3061 } 3062 restore_opts: 3063 #ifdef CONFIG_QUOTA 3064 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt; 3065 for (i = 0; i < MAXQUOTAS; i++) { 3066 kfree(F2FS_OPTION(sbi).s_qf_names[i]); 3067 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i]; 3068 } 3069 #endif 3070 sbi->mount_opt = org_mount_opt; 3071 sb->s_flags = old_sb_flags; 3072 3073 sbi->umount_lock_holder = NULL; 3074 return err; 3075 } 3076 3077 static void f2fs_shutdown(struct super_block *sb) 3078 { 3079 f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false, false); 3080 } 3081 3082 #ifdef CONFIG_QUOTA 3083 static bool f2fs_need_recovery(struct f2fs_sb_info *sbi) 3084 { 3085 /* need to recovery orphan */ 3086 if (is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG)) 3087 return true; 3088 /* need to recovery data */ 3089 if (test_opt(sbi, DISABLE_ROLL_FORWARD)) 3090 return false; 3091 if (test_opt(sbi, NORECOVERY)) 3092 return false; 3093 return !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG); 3094 } 3095 3096 static bool f2fs_recover_quota_begin(struct f2fs_sb_info *sbi) 3097 { 3098 bool readonly = f2fs_readonly(sbi->sb); 3099 3100 if (!f2fs_need_recovery(sbi)) 3101 return false; 3102 3103 /* it doesn't need to check f2fs_sb_has_readonly() */ 3104 if (f2fs_hw_is_readonly(sbi)) 3105 return false; 3106 3107 if (readonly) { 3108 sbi->sb->s_flags &= ~SB_RDONLY; 3109 set_sbi_flag(sbi, SBI_IS_WRITABLE); 3110 } 3111 3112 /* 3113 * Turn on quotas which were not enabled for read-only mounts if 3114 * filesystem has quota feature, so that they are updated correctly. 3115 */ 3116 return f2fs_enable_quota_files(sbi, readonly); 3117 } 3118 3119 static void f2fs_recover_quota_end(struct f2fs_sb_info *sbi, 3120 bool quota_enabled) 3121 { 3122 if (quota_enabled) 3123 f2fs_quota_off_umount(sbi->sb); 3124 3125 if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) { 3126 clear_sbi_flag(sbi, SBI_IS_WRITABLE); 3127 sbi->sb->s_flags |= SB_RDONLY; 3128 } 3129 } 3130 3131 /* Read data from quotafile */ 3132 static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data, 3133 size_t len, loff_t off) 3134 { 3135 struct inode *inode = sb_dqopt(sb)->files[type]; 3136 struct address_space *mapping = inode->i_mapping; 3137 int tocopy; 3138 size_t toread; 3139 loff_t i_size = i_size_read(inode); 3140 3141 if (off > i_size) 3142 return 0; 3143 3144 if (off + len > i_size) 3145 len = i_size - off; 3146 toread = len; 3147 while (toread > 0) { 3148 struct folio *folio; 3149 size_t offset; 3150 3151 repeat: 3152 folio = mapping_read_folio_gfp(mapping, off >> PAGE_SHIFT, 3153 GFP_NOFS); 3154 if (IS_ERR(folio)) { 3155 if (PTR_ERR(folio) == -ENOMEM) { 3156 memalloc_retry_wait(GFP_NOFS); 3157 goto repeat; 3158 } 3159 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); 3160 return PTR_ERR(folio); 3161 } 3162 offset = offset_in_folio(folio, off); 3163 tocopy = min(folio_size(folio) - offset, toread); 3164 3165 folio_lock(folio); 3166 3167 if (unlikely(folio->mapping != mapping)) { 3168 f2fs_folio_put(folio, true); 3169 goto repeat; 3170 } 3171 3172 /* 3173 * should never happen, just leave f2fs_bug_on() here to catch 3174 * any potential bug. 3175 */ 3176 f2fs_bug_on(F2FS_SB(sb), !folio_test_uptodate(folio)); 3177 3178 memcpy_from_folio(data, folio, offset, tocopy); 3179 f2fs_folio_put(folio, true); 3180 3181 toread -= tocopy; 3182 data += tocopy; 3183 off += tocopy; 3184 } 3185 return len; 3186 } 3187 3188 /* Write to quotafile */ 3189 static ssize_t f2fs_quota_write(struct super_block *sb, int type, 3190 const char *data, size_t len, loff_t off) 3191 { 3192 struct inode *inode = sb_dqopt(sb)->files[type]; 3193 struct address_space *mapping = inode->i_mapping; 3194 const struct address_space_operations *a_ops = mapping->a_ops; 3195 int offset = off & (sb->s_blocksize - 1); 3196 size_t towrite = len; 3197 struct folio *folio; 3198 void *fsdata = NULL; 3199 int err = 0; 3200 int tocopy; 3201 3202 while (towrite > 0) { 3203 tocopy = min_t(unsigned long, sb->s_blocksize - offset, 3204 towrite); 3205 retry: 3206 err = a_ops->write_begin(NULL, mapping, off, tocopy, 3207 &folio, &fsdata); 3208 if (unlikely(err)) { 3209 if (err == -ENOMEM) { 3210 memalloc_retry_wait(GFP_NOFS); 3211 goto retry; 3212 } 3213 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); 3214 break; 3215 } 3216 3217 memcpy_to_folio(folio, offset_in_folio(folio, off), data, tocopy); 3218 3219 a_ops->write_end(NULL, mapping, off, tocopy, tocopy, 3220 folio, fsdata); 3221 offset = 0; 3222 towrite -= tocopy; 3223 off += tocopy; 3224 data += tocopy; 3225 cond_resched(); 3226 } 3227 3228 if (len == towrite) 3229 return err; 3230 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 3231 f2fs_mark_inode_dirty_sync(inode, false); 3232 return len - towrite; 3233 } 3234 3235 int f2fs_dquot_initialize(struct inode *inode) 3236 { 3237 if (time_to_inject(F2FS_I_SB(inode), FAULT_DQUOT_INIT)) 3238 return -ESRCH; 3239 3240 return dquot_initialize(inode); 3241 } 3242 3243 static struct dquot __rcu **f2fs_get_dquots(struct inode *inode) 3244 { 3245 return F2FS_I(inode)->i_dquot; 3246 } 3247 3248 static qsize_t *f2fs_get_reserved_space(struct inode *inode) 3249 { 3250 return &F2FS_I(inode)->i_reserved_quota; 3251 } 3252 3253 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type) 3254 { 3255 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) { 3256 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it"); 3257 return 0; 3258 } 3259 3260 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type], 3261 F2FS_OPTION(sbi).s_jquota_fmt, type); 3262 } 3263 3264 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly) 3265 { 3266 int enabled = 0; 3267 int i, err; 3268 3269 if (f2fs_sb_has_quota_ino(sbi) && rdonly) { 3270 err = f2fs_enable_quotas(sbi->sb); 3271 if (err) { 3272 f2fs_err(sbi, "Cannot turn on quota_ino: %d", err); 3273 return 0; 3274 } 3275 return 1; 3276 } 3277 3278 for (i = 0; i < MAXQUOTAS; i++) { 3279 if (F2FS_OPTION(sbi).s_qf_names[i]) { 3280 err = f2fs_quota_on_mount(sbi, i); 3281 if (!err) { 3282 enabled = 1; 3283 continue; 3284 } 3285 f2fs_err(sbi, "Cannot turn on quotas: %d on %d", 3286 err, i); 3287 } 3288 } 3289 return enabled; 3290 } 3291 3292 static int f2fs_quota_enable(struct super_block *sb, int type, int format_id, 3293 unsigned int flags, unsigned long qf_inum) 3294 { 3295 struct inode *qf_inode; 3296 unsigned long qf_flag = F2FS_QUOTA_DEFAULT_FL; 3297 int err; 3298 3299 qf_inode = f2fs_iget(sb, qf_inum); 3300 if (IS_ERR(qf_inode)) { 3301 f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum); 3302 return PTR_ERR(qf_inode); 3303 } 3304 3305 /* Don't account quota for quota files to avoid recursion */ 3306 inode_lock(qf_inode); 3307 qf_inode->i_flags |= S_NOQUOTA; 3308 3309 if ((F2FS_I(qf_inode)->i_flags & qf_flag) != qf_flag) { 3310 F2FS_I(qf_inode)->i_flags |= qf_flag; 3311 f2fs_set_inode_flags(qf_inode); 3312 } 3313 inode_unlock(qf_inode); 3314 3315 err = dquot_load_quota_inode(qf_inode, type, format_id, flags); 3316 iput(qf_inode); 3317 return err; 3318 } 3319 3320 static int f2fs_enable_quotas(struct super_block *sb) 3321 { 3322 struct f2fs_sb_info *sbi = F2FS_SB(sb); 3323 int type, err = 0; 3324 unsigned long qf_inum; 3325 bool quota_mopt[MAXQUOTAS] = { 3326 test_opt(sbi, USRQUOTA), 3327 test_opt(sbi, GRPQUOTA), 3328 test_opt(sbi, PRJQUOTA), 3329 }; 3330 3331 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) { 3332 f2fs_err(sbi, "quota file may be corrupted, skip loading it"); 3333 return 0; 3334 } 3335 3336 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; 3337 3338 for (type = 0; type < MAXQUOTAS; type++) { 3339 qf_inum = f2fs_qf_ino(sb, type); 3340 if (qf_inum) { 3341 err = f2fs_quota_enable(sb, type, QFMT_VFS_V1, 3342 DQUOT_USAGE_ENABLED | 3343 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0), qf_inum); 3344 if (err) { 3345 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.", 3346 type, err); 3347 for (type--; type >= 0; type--) 3348 dquot_quota_off(sb, type); 3349 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 3350 return err; 3351 } 3352 } 3353 } 3354 return 0; 3355 } 3356 3357 static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type) 3358 { 3359 struct quota_info *dqopt = sb_dqopt(sbi->sb); 3360 struct address_space *mapping = dqopt->files[type]->i_mapping; 3361 int ret = 0; 3362 3363 ret = dquot_writeback_dquots(sbi->sb, type); 3364 if (ret) 3365 goto out; 3366 3367 ret = filemap_fdatawrite(mapping); 3368 if (ret) 3369 goto out; 3370 3371 /* if we are using journalled quota */ 3372 if (is_journalled_quota(sbi)) 3373 goto out; 3374 3375 ret = filemap_fdatawait(mapping); 3376 3377 truncate_inode_pages(&dqopt->files[type]->i_data, 0); 3378 out: 3379 if (ret) 3380 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 3381 return ret; 3382 } 3383 3384 int f2fs_do_quota_sync(struct super_block *sb, int type) 3385 { 3386 struct f2fs_sb_info *sbi = F2FS_SB(sb); 3387 struct quota_info *dqopt = sb_dqopt(sb); 3388 int cnt; 3389 int ret = 0; 3390 3391 /* 3392 * Now when everything is written we can discard the pagecache so 3393 * that userspace sees the changes. 3394 */ 3395 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 3396 struct f2fs_lock_context lc; 3397 3398 if (type != -1 && cnt != type) 3399 continue; 3400 3401 if (!sb_has_quota_active(sb, cnt)) 3402 continue; 3403 3404 if (!f2fs_sb_has_quota_ino(sbi)) 3405 inode_lock(dqopt->files[cnt]); 3406 3407 /* 3408 * do_quotactl 3409 * f2fs_quota_sync 3410 * f2fs_down_read(quota_sem) 3411 * dquot_writeback_dquots() 3412 * f2fs_dquot_commit 3413 * block_operation 3414 * f2fs_down_read(quota_sem) 3415 */ 3416 f2fs_lock_op(sbi, &lc); 3417 f2fs_down_read(&sbi->quota_sem); 3418 3419 ret = f2fs_quota_sync_file(sbi, cnt); 3420 3421 f2fs_up_read(&sbi->quota_sem); 3422 f2fs_unlock_op(sbi, &lc); 3423 3424 if (!f2fs_sb_has_quota_ino(sbi)) 3425 inode_unlock(dqopt->files[cnt]); 3426 3427 if (ret) 3428 break; 3429 } 3430 return ret; 3431 } 3432 3433 static int f2fs_quota_sync(struct super_block *sb, int type) 3434 { 3435 int ret; 3436 3437 F2FS_SB(sb)->umount_lock_holder = current; 3438 ret = f2fs_do_quota_sync(sb, type); 3439 F2FS_SB(sb)->umount_lock_holder = NULL; 3440 return ret; 3441 } 3442 3443 static int f2fs_quota_on(struct super_block *sb, int type, int format_id, 3444 const struct path *path) 3445 { 3446 struct inode *inode; 3447 int err = 0; 3448 3449 /* if quota sysfile exists, deny enabling quota with specific file */ 3450 if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) { 3451 f2fs_err(F2FS_SB(sb), "quota sysfile already exists"); 3452 return -EBUSY; 3453 } 3454 3455 if (path->dentry->d_sb != sb) 3456 return -EXDEV; 3457 3458 F2FS_SB(sb)->umount_lock_holder = current; 3459 3460 err = f2fs_do_quota_sync(sb, type); 3461 if (err) 3462 goto out; 3463 3464 inode = d_inode(path->dentry); 3465 3466 err = filemap_fdatawrite(inode->i_mapping); 3467 if (err) 3468 goto out; 3469 3470 err = filemap_fdatawait(inode->i_mapping); 3471 if (err) 3472 goto out; 3473 3474 err = dquot_quota_on(sb, type, format_id, path); 3475 if (err) 3476 goto out; 3477 3478 inode_lock(inode); 3479 F2FS_I(inode)->i_flags |= F2FS_QUOTA_DEFAULT_FL; 3480 f2fs_set_inode_flags(inode); 3481 inode_unlock(inode); 3482 f2fs_mark_inode_dirty_sync(inode, false); 3483 out: 3484 F2FS_SB(sb)->umount_lock_holder = NULL; 3485 return err; 3486 } 3487 3488 static int __f2fs_quota_off(struct super_block *sb, int type) 3489 { 3490 struct inode *inode = sb_dqopt(sb)->files[type]; 3491 int err; 3492 3493 if (!inode || !igrab(inode)) 3494 return dquot_quota_off(sb, type); 3495 3496 err = f2fs_do_quota_sync(sb, type); 3497 if (err) 3498 goto out_put; 3499 3500 err = dquot_quota_off(sb, type); 3501 if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb))) 3502 goto out_put; 3503 3504 inode_lock(inode); 3505 F2FS_I(inode)->i_flags &= ~F2FS_QUOTA_DEFAULT_FL; 3506 f2fs_set_inode_flags(inode); 3507 inode_unlock(inode); 3508 f2fs_mark_inode_dirty_sync(inode, false); 3509 out_put: 3510 iput(inode); 3511 return err; 3512 } 3513 3514 static int f2fs_quota_off(struct super_block *sb, int type) 3515 { 3516 struct f2fs_sb_info *sbi = F2FS_SB(sb); 3517 int err; 3518 3519 F2FS_SB(sb)->umount_lock_holder = current; 3520 3521 err = __f2fs_quota_off(sb, type); 3522 3523 /* 3524 * quotactl can shutdown journalled quota, result in inconsistence 3525 * between quota record and fs data by following updates, tag the 3526 * flag to let fsck be aware of it. 3527 */ 3528 if (is_journalled_quota(sbi)) 3529 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 3530 3531 F2FS_SB(sb)->umount_lock_holder = NULL; 3532 3533 return err; 3534 } 3535 3536 void f2fs_quota_off_umount(struct super_block *sb) 3537 { 3538 int type; 3539 int err; 3540 3541 for (type = 0; type < MAXQUOTAS; type++) { 3542 err = __f2fs_quota_off(sb, type); 3543 if (err) { 3544 int ret = dquot_quota_off(sb, type); 3545 3546 f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.", 3547 type, err, ret); 3548 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); 3549 } 3550 } 3551 /* 3552 * In case of checkpoint=disable, we must flush quota blocks. 3553 * This can cause NULL exception for node_inode in end_io, since 3554 * put_super already dropped it. 3555 */ 3556 sync_filesystem(sb); 3557 } 3558 3559 static void f2fs_truncate_quota_inode_pages(struct super_block *sb) 3560 { 3561 struct quota_info *dqopt = sb_dqopt(sb); 3562 int type; 3563 3564 for (type = 0; type < MAXQUOTAS; type++) { 3565 if (!dqopt->files[type]) 3566 continue; 3567 f2fs_inode_synced(dqopt->files[type]); 3568 } 3569 } 3570 3571 static int f2fs_dquot_commit(struct dquot *dquot) 3572 { 3573 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); 3574 int ret; 3575 3576 f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING); 3577 ret = dquot_commit(dquot); 3578 if (ret < 0) 3579 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 3580 f2fs_up_read(&sbi->quota_sem); 3581 return ret; 3582 } 3583 3584 static int f2fs_dquot_acquire(struct dquot *dquot) 3585 { 3586 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); 3587 int ret; 3588 3589 f2fs_down_read(&sbi->quota_sem); 3590 ret = dquot_acquire(dquot); 3591 if (ret < 0) 3592 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 3593 f2fs_up_read(&sbi->quota_sem); 3594 return ret; 3595 } 3596 3597 static int f2fs_dquot_release(struct dquot *dquot) 3598 { 3599 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); 3600 int ret = dquot_release(dquot); 3601 3602 if (ret < 0) 3603 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 3604 return ret; 3605 } 3606 3607 static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot) 3608 { 3609 struct super_block *sb = dquot->dq_sb; 3610 struct f2fs_sb_info *sbi = F2FS_SB(sb); 3611 int ret = dquot_mark_dquot_dirty(dquot); 3612 3613 /* if we are using journalled quota */ 3614 if (is_journalled_quota(sbi)) 3615 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); 3616 3617 return ret; 3618 } 3619 3620 static int f2fs_dquot_commit_info(struct super_block *sb, int type) 3621 { 3622 struct f2fs_sb_info *sbi = F2FS_SB(sb); 3623 int ret = dquot_commit_info(sb, type); 3624 3625 if (ret < 0) 3626 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 3627 return ret; 3628 } 3629 3630 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid) 3631 { 3632 *projid = F2FS_I(inode)->i_projid; 3633 return 0; 3634 } 3635 3636 static const struct dquot_operations f2fs_quota_operations = { 3637 .get_reserved_space = f2fs_get_reserved_space, 3638 .write_dquot = f2fs_dquot_commit, 3639 .acquire_dquot = f2fs_dquot_acquire, 3640 .release_dquot = f2fs_dquot_release, 3641 .mark_dirty = f2fs_dquot_mark_dquot_dirty, 3642 .write_info = f2fs_dquot_commit_info, 3643 .alloc_dquot = dquot_alloc, 3644 .destroy_dquot = dquot_destroy, 3645 .get_projid = f2fs_get_projid, 3646 .get_next_id = dquot_get_next_id, 3647 }; 3648 3649 static const struct quotactl_ops f2fs_quotactl_ops = { 3650 .quota_on = f2fs_quota_on, 3651 .quota_off = f2fs_quota_off, 3652 .quota_sync = f2fs_quota_sync, 3653 .get_state = dquot_get_state, 3654 .set_info = dquot_set_dqinfo, 3655 .get_dqblk = dquot_get_dqblk, 3656 .set_dqblk = dquot_set_dqblk, 3657 .get_nextdqblk = dquot_get_next_dqblk, 3658 }; 3659 #else 3660 int f2fs_dquot_initialize(struct inode *inode) 3661 { 3662 return 0; 3663 } 3664 3665 int f2fs_do_quota_sync(struct super_block *sb, int type) 3666 { 3667 return 0; 3668 } 3669 3670 void f2fs_quota_off_umount(struct super_block *sb) 3671 { 3672 } 3673 #endif 3674 3675 static const struct super_operations f2fs_sops = { 3676 .alloc_inode = f2fs_alloc_inode, 3677 .free_inode = f2fs_free_inode, 3678 .drop_inode = f2fs_drop_inode, 3679 .write_inode = f2fs_write_inode, 3680 .dirty_inode = f2fs_dirty_inode, 3681 .show_options = f2fs_show_options, 3682 #ifdef CONFIG_QUOTA 3683 .quota_read = f2fs_quota_read, 3684 .quota_write = f2fs_quota_write, 3685 .get_dquots = f2fs_get_dquots, 3686 #endif 3687 .evict_inode = f2fs_evict_inode, 3688 .put_super = f2fs_put_super, 3689 .sync_fs = f2fs_sync_fs, 3690 .freeze_fs = f2fs_freeze, 3691 .unfreeze_fs = f2fs_unfreeze, 3692 .statfs = f2fs_statfs, 3693 .shutdown = f2fs_shutdown, 3694 }; 3695 3696 #ifdef CONFIG_FS_ENCRYPTION 3697 static int f2fs_get_context(struct inode *inode, void *ctx, size_t len) 3698 { 3699 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION, 3700 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, 3701 ctx, len, NULL); 3702 } 3703 3704 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len, 3705 void *fs_data) 3706 { 3707 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3708 3709 /* 3710 * Encrypting the root directory is not allowed because fsck 3711 * expects lost+found directory to exist and remain unencrypted 3712 * if LOST_FOUND feature is enabled. 3713 * 3714 */ 3715 if (f2fs_sb_has_lost_found(sbi) && 3716 inode->i_ino == F2FS_ROOT_INO(sbi)) 3717 return -EPERM; 3718 3719 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION, 3720 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, 3721 ctx, len, fs_data, XATTR_CREATE); 3722 } 3723 3724 static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb) 3725 { 3726 return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy; 3727 } 3728 3729 static bool f2fs_has_stable_inodes(struct super_block *sb) 3730 { 3731 return true; 3732 } 3733 3734 static struct block_device **f2fs_get_devices(struct super_block *sb, 3735 unsigned int *num_devs) 3736 { 3737 struct f2fs_sb_info *sbi = F2FS_SB(sb); 3738 struct block_device **devs; 3739 int i; 3740 3741 if (!f2fs_is_multi_device(sbi)) 3742 return NULL; 3743 3744 devs = kmalloc_objs(*devs, sbi->s_ndevs); 3745 if (!devs) 3746 return ERR_PTR(-ENOMEM); 3747 3748 for (i = 0; i < sbi->s_ndevs; i++) 3749 devs[i] = FDEV(i).bdev; 3750 *num_devs = sbi->s_ndevs; 3751 return devs; 3752 } 3753 3754 static const struct fscrypt_operations f2fs_cryptops = { 3755 .inode_info_offs = (int)offsetof(struct f2fs_inode_info, i_crypt_info) - 3756 (int)offsetof(struct f2fs_inode_info, vfs_inode), 3757 .needs_bounce_pages = 1, 3758 .has_32bit_inodes = 1, 3759 .supports_subblock_data_units = 1, 3760 .legacy_key_prefix = "f2fs:", 3761 .get_context = f2fs_get_context, 3762 .set_context = f2fs_set_context, 3763 .get_dummy_policy = f2fs_get_dummy_policy, 3764 .empty_dir = f2fs_empty_dir, 3765 .has_stable_inodes = f2fs_has_stable_inodes, 3766 .get_devices = f2fs_get_devices, 3767 }; 3768 #endif /* CONFIG_FS_ENCRYPTION */ 3769 3770 static struct inode *f2fs_nfs_get_inode(struct super_block *sb, 3771 u64 ino, u32 generation) 3772 { 3773 struct f2fs_sb_info *sbi = F2FS_SB(sb); 3774 struct inode *inode; 3775 3776 if (f2fs_check_nid_range(sbi, ino)) 3777 return ERR_PTR(-ESTALE); 3778 3779 /* 3780 * f2fs_iget isn't quite right if the inode is currently unallocated! 3781 * However f2fs_iget currently does appropriate checks to handle stale 3782 * inodes so everything is OK. 3783 */ 3784 inode = f2fs_iget(sb, ino); 3785 if (IS_ERR(inode)) 3786 return ERR_CAST(inode); 3787 if (unlikely(generation && inode->i_generation != generation)) { 3788 /* we didn't find the right inode.. */ 3789 iput(inode); 3790 return ERR_PTR(-ESTALE); 3791 } 3792 return inode; 3793 } 3794 3795 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid, 3796 int fh_len, int fh_type) 3797 { 3798 return generic_fh_to_dentry(sb, fid, fh_len, fh_type, 3799 f2fs_nfs_get_inode); 3800 } 3801 3802 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid, 3803 int fh_len, int fh_type) 3804 { 3805 return generic_fh_to_parent(sb, fid, fh_len, fh_type, 3806 f2fs_nfs_get_inode); 3807 } 3808 3809 static const struct export_operations f2fs_export_ops = { 3810 .encode_fh = generic_encode_ino32_fh, 3811 .fh_to_dentry = f2fs_fh_to_dentry, 3812 .fh_to_parent = f2fs_fh_to_parent, 3813 .get_parent = f2fs_get_parent, 3814 }; 3815 3816 loff_t max_file_blocks(struct inode *inode) 3817 { 3818 loff_t result = 0; 3819 loff_t leaf_count; 3820 3821 /* 3822 * note: previously, result is equal to (DEF_ADDRS_PER_INODE - 3823 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more 3824 * space in inode.i_addr, it will be more safe to reassign 3825 * result as zero. 3826 */ 3827 3828 if (inode && f2fs_compressed_file(inode)) 3829 leaf_count = ADDRS_PER_BLOCK(inode); 3830 else 3831 leaf_count = DEF_ADDRS_PER_BLOCK; 3832 3833 /* two direct node blocks */ 3834 result += (leaf_count * 2); 3835 3836 /* two indirect node blocks */ 3837 leaf_count *= NIDS_PER_BLOCK; 3838 result += (leaf_count * 2); 3839 3840 /* one double indirect node block */ 3841 leaf_count *= NIDS_PER_BLOCK; 3842 result += leaf_count; 3843 3844 /* 3845 * For compatibility with FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{64,32} with 3846 * a 4K crypto data unit, we must restrict the max filesize to what can 3847 * fit within U32_MAX + 1 data units. 3848 */ 3849 3850 result = umin(result, F2FS_BYTES_TO_BLK(((loff_t)U32_MAX + 1) * 4096)); 3851 3852 return result; 3853 } 3854 3855 static int __f2fs_commit_super(struct f2fs_sb_info *sbi, struct folio *folio, 3856 pgoff_t index, bool update) 3857 { 3858 struct bio *bio; 3859 /* it's rare case, we can do fua all the time */ 3860 blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA; 3861 int ret; 3862 3863 folio_lock(folio); 3864 folio_wait_writeback(folio); 3865 if (update) 3866 memcpy(F2FS_SUPER_BLOCK(folio, index), F2FS_RAW_SUPER(sbi), 3867 sizeof(struct f2fs_super_block)); 3868 folio_mark_dirty(folio); 3869 folio_clear_dirty_for_io(folio); 3870 folio_start_writeback(folio); 3871 folio_unlock(folio); 3872 3873 bio = bio_alloc(sbi->sb->s_bdev, 1, opf, GFP_NOFS); 3874 3875 /* it doesn't need to set crypto context for superblock update */ 3876 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(folio->index); 3877 3878 if (!bio_add_folio(bio, folio, folio_size(folio), 0)) 3879 f2fs_bug_on(sbi, 1); 3880 3881 ret = submit_bio_wait(bio); 3882 bio_put(bio); 3883 folio_end_writeback(folio); 3884 3885 return ret; 3886 } 3887 3888 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, 3889 struct folio *folio, pgoff_t index) 3890 { 3891 struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index); 3892 struct super_block *sb = sbi->sb; 3893 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 3894 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr); 3895 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr); 3896 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr); 3897 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 3898 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 3899 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt); 3900 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit); 3901 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat); 3902 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa); 3903 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main); 3904 u32 segment_count = le32_to_cpu(raw_super->segment_count); 3905 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); 3906 u64 main_end_blkaddr = main_blkaddr + 3907 ((u64)segment_count_main << log_blocks_per_seg); 3908 u64 seg_end_blkaddr = segment0_blkaddr + 3909 ((u64)segment_count << log_blocks_per_seg); 3910 3911 if (segment0_blkaddr != cp_blkaddr) { 3912 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)", 3913 segment0_blkaddr, cp_blkaddr); 3914 return true; 3915 } 3916 3917 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) != 3918 sit_blkaddr) { 3919 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)", 3920 cp_blkaddr, sit_blkaddr, 3921 segment_count_ckpt << log_blocks_per_seg); 3922 return true; 3923 } 3924 3925 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) != 3926 nat_blkaddr) { 3927 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)", 3928 sit_blkaddr, nat_blkaddr, 3929 segment_count_sit << log_blocks_per_seg); 3930 return true; 3931 } 3932 3933 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) != 3934 ssa_blkaddr) { 3935 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)", 3936 nat_blkaddr, ssa_blkaddr, 3937 segment_count_nat << log_blocks_per_seg); 3938 return true; 3939 } 3940 3941 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) != 3942 main_blkaddr) { 3943 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)", 3944 ssa_blkaddr, main_blkaddr, 3945 segment_count_ssa << log_blocks_per_seg); 3946 return true; 3947 } 3948 3949 if (main_end_blkaddr > seg_end_blkaddr) { 3950 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)", 3951 main_blkaddr, seg_end_blkaddr, 3952 segment_count_main << log_blocks_per_seg); 3953 return true; 3954 } else if (main_end_blkaddr < seg_end_blkaddr) { 3955 int err = 0; 3956 char *res; 3957 3958 /* fix in-memory information all the time */ 3959 raw_super->segment_count = cpu_to_le32((main_end_blkaddr - 3960 segment0_blkaddr) >> log_blocks_per_seg); 3961 3962 if (f2fs_readonly(sb) || f2fs_hw_is_readonly(sbi)) { 3963 set_sbi_flag(sbi, SBI_NEED_SB_WRITE); 3964 res = "internally"; 3965 } else { 3966 err = __f2fs_commit_super(sbi, folio, index, false); 3967 res = err ? "failed" : "done"; 3968 } 3969 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)", 3970 res, main_blkaddr, seg_end_blkaddr, 3971 segment_count_main << log_blocks_per_seg); 3972 if (err) 3973 return true; 3974 } 3975 return false; 3976 } 3977 3978 static int sanity_check_raw_super(struct f2fs_sb_info *sbi, 3979 struct folio *folio, pgoff_t index) 3980 { 3981 block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main; 3982 block_t total_sections, blocks_per_seg; 3983 struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index); 3984 size_t crc_offset = 0; 3985 __u32 crc = 0; 3986 3987 if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) { 3988 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)", 3989 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); 3990 return -EINVAL; 3991 } 3992 3993 /* Check checksum_offset and crc in superblock */ 3994 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) { 3995 crc_offset = le32_to_cpu(raw_super->checksum_offset); 3996 if (crc_offset != 3997 offsetof(struct f2fs_super_block, crc)) { 3998 f2fs_info(sbi, "Invalid SB checksum offset: %zu", 3999 crc_offset); 4000 return -EFSCORRUPTED; 4001 } 4002 crc = le32_to_cpu(raw_super->crc); 4003 if (crc != f2fs_crc32(raw_super, crc_offset)) { 4004 f2fs_info(sbi, "Invalid SB checksum value: %u", crc); 4005 return -EFSCORRUPTED; 4006 } 4007 } 4008 4009 /* only support block_size equals to PAGE_SIZE */ 4010 if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) { 4011 f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u", 4012 le32_to_cpu(raw_super->log_blocksize), 4013 F2FS_BLKSIZE_BITS); 4014 return -EFSCORRUPTED; 4015 } 4016 4017 /* check log blocks per segment */ 4018 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) { 4019 f2fs_info(sbi, "Invalid log blocks per segment (%u)", 4020 le32_to_cpu(raw_super->log_blocks_per_seg)); 4021 return -EFSCORRUPTED; 4022 } 4023 4024 /* Currently, support 512/1024/2048/4096/16K bytes sector size */ 4025 if (le32_to_cpu(raw_super->log_sectorsize) > 4026 F2FS_MAX_LOG_SECTOR_SIZE || 4027 le32_to_cpu(raw_super->log_sectorsize) < 4028 F2FS_MIN_LOG_SECTOR_SIZE) { 4029 f2fs_info(sbi, "Invalid log sectorsize (%u)", 4030 le32_to_cpu(raw_super->log_sectorsize)); 4031 return -EFSCORRUPTED; 4032 } 4033 if (le32_to_cpu(raw_super->log_sectors_per_block) + 4034 le32_to_cpu(raw_super->log_sectorsize) != 4035 F2FS_MAX_LOG_SECTOR_SIZE) { 4036 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)", 4037 le32_to_cpu(raw_super->log_sectors_per_block), 4038 le32_to_cpu(raw_super->log_sectorsize)); 4039 return -EFSCORRUPTED; 4040 } 4041 4042 segment_count = le32_to_cpu(raw_super->segment_count); 4043 segment_count_main = le32_to_cpu(raw_super->segment_count_main); 4044 segs_per_sec = le32_to_cpu(raw_super->segs_per_sec); 4045 secs_per_zone = le32_to_cpu(raw_super->secs_per_zone); 4046 total_sections = le32_to_cpu(raw_super->section_count); 4047 4048 /* blocks_per_seg should be 512, given the above check */ 4049 blocks_per_seg = BIT(le32_to_cpu(raw_super->log_blocks_per_seg)); 4050 4051 if (segment_count > F2FS_MAX_SEGMENT || 4052 segment_count < F2FS_MIN_SEGMENTS) { 4053 f2fs_info(sbi, "Invalid segment count (%u)", segment_count); 4054 return -EFSCORRUPTED; 4055 } 4056 4057 if (total_sections > segment_count_main || total_sections < 1 || 4058 segs_per_sec > segment_count || !segs_per_sec) { 4059 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)", 4060 segment_count, total_sections, segs_per_sec); 4061 return -EFSCORRUPTED; 4062 } 4063 4064 if (segment_count_main != total_sections * segs_per_sec) { 4065 f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)", 4066 segment_count_main, total_sections, segs_per_sec); 4067 return -EFSCORRUPTED; 4068 } 4069 4070 if ((segment_count / segs_per_sec) < total_sections) { 4071 f2fs_info(sbi, "Small segment_count (%u < %u * %u)", 4072 segment_count, segs_per_sec, total_sections); 4073 return -EFSCORRUPTED; 4074 } 4075 4076 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) { 4077 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)", 4078 segment_count, le64_to_cpu(raw_super->block_count)); 4079 return -EFSCORRUPTED; 4080 } 4081 4082 if (RDEV(0).path[0]) { 4083 block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments); 4084 int i = 1; 4085 4086 while (i < MAX_DEVICES && RDEV(i).path[0]) { 4087 dev_seg_count += le32_to_cpu(RDEV(i).total_segments); 4088 i++; 4089 } 4090 if (segment_count != dev_seg_count) { 4091 f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)", 4092 segment_count, dev_seg_count); 4093 return -EFSCORRUPTED; 4094 } 4095 } else { 4096 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) && 4097 !bdev_is_zoned(sbi->sb->s_bdev)) { 4098 f2fs_info(sbi, "Zoned block device path is missing"); 4099 return -EFSCORRUPTED; 4100 } 4101 } 4102 4103 if (secs_per_zone > total_sections || !secs_per_zone) { 4104 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)", 4105 secs_per_zone, total_sections); 4106 return -EFSCORRUPTED; 4107 } 4108 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION || 4109 raw_super->hot_ext_count > F2FS_MAX_EXTENSION || 4110 (le32_to_cpu(raw_super->extension_count) + 4111 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) { 4112 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)", 4113 le32_to_cpu(raw_super->extension_count), 4114 raw_super->hot_ext_count, 4115 F2FS_MAX_EXTENSION); 4116 return -EFSCORRUPTED; 4117 } 4118 4119 if (le32_to_cpu(raw_super->cp_payload) >= 4120 (blocks_per_seg - F2FS_CP_PACKS - 4121 NR_CURSEG_PERSIST_TYPE)) { 4122 f2fs_info(sbi, "Insane cp_payload (%u >= %u)", 4123 le32_to_cpu(raw_super->cp_payload), 4124 blocks_per_seg - F2FS_CP_PACKS - 4125 NR_CURSEG_PERSIST_TYPE); 4126 return -EFSCORRUPTED; 4127 } 4128 4129 /* check reserved ino info */ 4130 if (le32_to_cpu(raw_super->node_ino) != 1 || 4131 le32_to_cpu(raw_super->meta_ino) != 2 || 4132 le32_to_cpu(raw_super->root_ino) != 3) { 4133 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)", 4134 le32_to_cpu(raw_super->node_ino), 4135 le32_to_cpu(raw_super->meta_ino), 4136 le32_to_cpu(raw_super->root_ino)); 4137 return -EFSCORRUPTED; 4138 } 4139 4140 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ 4141 if (sanity_check_area_boundary(sbi, folio, index)) 4142 return -EFSCORRUPTED; 4143 4144 return 0; 4145 } 4146 4147 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) 4148 { 4149 unsigned int total, fsmeta; 4150 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 4151 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 4152 unsigned int ovp_segments, reserved_segments; 4153 unsigned int main_segs, blocks_per_seg; 4154 unsigned int sit_segs, nat_segs; 4155 unsigned int sit_bitmap_size, nat_bitmap_size; 4156 unsigned int log_blocks_per_seg; 4157 unsigned int segment_count_main; 4158 unsigned int cp_pack_start_sum, cp_payload; 4159 block_t user_block_count, valid_user_blocks; 4160 block_t avail_node_count, valid_node_count; 4161 unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks; 4162 unsigned int sit_blk_cnt; 4163 int i, j; 4164 4165 total = le32_to_cpu(raw_super->segment_count); 4166 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt); 4167 sit_segs = le32_to_cpu(raw_super->segment_count_sit); 4168 fsmeta += sit_segs; 4169 nat_segs = le32_to_cpu(raw_super->segment_count_nat); 4170 fsmeta += nat_segs; 4171 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count); 4172 fsmeta += le32_to_cpu(raw_super->segment_count_ssa); 4173 4174 if (unlikely(fsmeta >= total)) 4175 return 1; 4176 4177 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 4178 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 4179 4180 if (!f2fs_sb_has_readonly(sbi) && 4181 unlikely(fsmeta < F2FS_MIN_META_SEGMENTS || 4182 ovp_segments == 0 || reserved_segments == 0)) { 4183 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version"); 4184 return 1; 4185 } 4186 user_block_count = le64_to_cpu(ckpt->user_block_count); 4187 segment_count_main = le32_to_cpu(raw_super->segment_count_main) + 4188 (f2fs_sb_has_readonly(sbi) ? 1 : 0); 4189 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); 4190 if (!user_block_count || user_block_count >= 4191 segment_count_main << log_blocks_per_seg) { 4192 f2fs_err(sbi, "Wrong user_block_count: %u", 4193 user_block_count); 4194 return 1; 4195 } 4196 4197 valid_user_blocks = le64_to_cpu(ckpt->valid_block_count); 4198 if (valid_user_blocks > user_block_count) { 4199 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u", 4200 valid_user_blocks, user_block_count); 4201 return 1; 4202 } 4203 4204 valid_node_count = le32_to_cpu(ckpt->valid_node_count); 4205 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; 4206 if (valid_node_count > avail_node_count) { 4207 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u", 4208 valid_node_count, avail_node_count); 4209 return 1; 4210 } 4211 4212 main_segs = le32_to_cpu(raw_super->segment_count_main); 4213 blocks_per_seg = BLKS_PER_SEG(sbi); 4214 4215 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { 4216 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs || 4217 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) 4218 return 1; 4219 4220 if (f2fs_sb_has_readonly(sbi)) 4221 goto check_data; 4222 4223 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) { 4224 if (le32_to_cpu(ckpt->cur_node_segno[i]) == 4225 le32_to_cpu(ckpt->cur_node_segno[j])) { 4226 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u", 4227 i, j, 4228 le32_to_cpu(ckpt->cur_node_segno[i])); 4229 return 1; 4230 } 4231 } 4232 } 4233 check_data: 4234 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) { 4235 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs || 4236 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) 4237 return 1; 4238 4239 if (f2fs_sb_has_readonly(sbi)) 4240 goto skip_cross; 4241 4242 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) { 4243 if (le32_to_cpu(ckpt->cur_data_segno[i]) == 4244 le32_to_cpu(ckpt->cur_data_segno[j])) { 4245 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u", 4246 i, j, 4247 le32_to_cpu(ckpt->cur_data_segno[i])); 4248 return 1; 4249 } 4250 } 4251 } 4252 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { 4253 for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) { 4254 if (le32_to_cpu(ckpt->cur_node_segno[i]) == 4255 le32_to_cpu(ckpt->cur_data_segno[j])) { 4256 f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u", 4257 i, j, 4258 le32_to_cpu(ckpt->cur_node_segno[i])); 4259 return 1; 4260 } 4261 } 4262 } 4263 skip_cross: 4264 sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); 4265 nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); 4266 4267 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 || 4268 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) { 4269 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u", 4270 sit_bitmap_size, nat_bitmap_size); 4271 return 1; 4272 } 4273 4274 sit_blk_cnt = DIV_ROUND_UP(main_segs, SIT_ENTRY_PER_BLOCK); 4275 if (sit_bitmap_size * 8 < sit_blk_cnt) { 4276 f2fs_err(sbi, "Wrong bitmap size: sit: %u, sit_blk_cnt:%u", 4277 sit_bitmap_size, sit_blk_cnt); 4278 return 1; 4279 } 4280 4281 cp_pack_start_sum = __start_sum_addr(sbi); 4282 cp_payload = __cp_payload(sbi); 4283 if (cp_pack_start_sum < cp_payload + 1 || 4284 cp_pack_start_sum > blocks_per_seg - 1 - 4285 NR_CURSEG_PERSIST_TYPE) { 4286 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u", 4287 cp_pack_start_sum); 4288 return 1; 4289 } 4290 4291 if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) && 4292 le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) { 4293 f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, " 4294 "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, " 4295 "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"", 4296 le32_to_cpu(ckpt->checksum_offset)); 4297 return 1; 4298 } 4299 4300 nat_blocks = nat_segs << log_blocks_per_seg; 4301 nat_bits_bytes = nat_blocks / BITS_PER_BYTE; 4302 nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); 4303 if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) && 4304 (cp_payload + F2FS_CP_PACKS + 4305 NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) { 4306 f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)", 4307 cp_payload, nat_bits_blocks); 4308 return 1; 4309 } 4310 4311 if (unlikely(f2fs_cp_error(sbi))) { 4312 f2fs_err(sbi, "A bug case: need to run fsck"); 4313 return 1; 4314 } 4315 return 0; 4316 } 4317 4318 static void init_sb_info(struct f2fs_sb_info *sbi) 4319 { 4320 struct f2fs_super_block *raw_super = sbi->raw_super; 4321 int i; 4322 4323 sbi->log_sectors_per_block = 4324 le32_to_cpu(raw_super->log_sectors_per_block); 4325 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize); 4326 sbi->blocksize = BIT(sbi->log_blocksize); 4327 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); 4328 sbi->blocks_per_seg = BIT(sbi->log_blocks_per_seg); 4329 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec); 4330 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone); 4331 sbi->total_sections = le32_to_cpu(raw_super->section_count); 4332 sbi->total_node_count = SEGS_TO_BLKS(sbi, 4333 ((le32_to_cpu(raw_super->segment_count_nat) / 2) * 4334 NAT_ENTRY_PER_BLOCK)); 4335 sbi->allocate_section_hint = le32_to_cpu(raw_super->section_count); 4336 sbi->allocate_section_policy = ALLOCATE_FORWARD_NOHINT; 4337 F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino); 4338 F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino); 4339 F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino); 4340 sbi->cur_victim_sec = NULL_SECNO; 4341 sbi->gc_mode = GC_NORMAL; 4342 sbi->next_victim_seg[BG_GC] = NULL_SEGNO; 4343 sbi->next_victim_seg[FG_GC] = NULL_SEGNO; 4344 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH; 4345 sbi->migration_granularity = SEGS_PER_SEC(sbi); 4346 sbi->migration_window_granularity = f2fs_sb_has_blkzoned(sbi) ? 4347 DEF_MIGRATION_WINDOW_GRANULARITY_ZONED : SEGS_PER_SEC(sbi); 4348 sbi->seq_file_ra_mul = MIN_RA_MUL; 4349 sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE; 4350 sbi->max_fragment_hole = DEF_FRAGMENT_SIZE; 4351 spin_lock_init(&sbi->gc_remaining_trials_lock); 4352 atomic64_set(&sbi->current_atomic_write, 0); 4353 sbi->max_lock_elapsed_time = MAX_LOCK_ELAPSED_TIME; 4354 sbi->adjust_lock_priority = 0; 4355 sbi->lock_duration_priority = F2FS_DEFAULT_TASK_PRIORITY; 4356 sbi->critical_task_priority = F2FS_CRITICAL_TASK_PRIORITY; 4357 4358 sbi->sum_blocksize = f2fs_sb_has_packed_ssa(sbi) ? 4359 4096 : sbi->blocksize; 4360 sbi->sums_per_block = sbi->blocksize / sbi->sum_blocksize; 4361 sbi->entries_in_sum = sbi->sum_blocksize / 8; 4362 sbi->sum_entry_size = SUMMARY_SIZE * sbi->entries_in_sum; 4363 sbi->sum_journal_size = sbi->sum_blocksize - SUM_FOOTER_SIZE - 4364 sbi->sum_entry_size; 4365 sbi->nat_journal_entries = (sbi->sum_journal_size - 2) / 4366 sizeof(struct nat_journal_entry); 4367 sbi->sit_journal_entries = (sbi->sum_journal_size - 2) / 4368 sizeof(struct sit_journal_entry); 4369 4370 sbi->dir_level = DEF_DIR_LEVEL; 4371 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL; 4372 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL; 4373 sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL; 4374 sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL; 4375 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL; 4376 sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] = 4377 DEF_UMOUNT_DISCARD_TIMEOUT; 4378 clear_sbi_flag(sbi, SBI_NEED_FSCK); 4379 4380 for (i = 0; i < NR_COUNT_TYPE; i++) 4381 atomic_set(&sbi->nr_pages[i], 0); 4382 4383 for (i = 0; i < META; i++) 4384 atomic_set(&sbi->wb_sync_req[i], 0); 4385 4386 INIT_LIST_HEAD(&sbi->s_list); 4387 mutex_init(&sbi->umount_mutex); 4388 init_f2fs_rwsem(&sbi->io_order_lock); 4389 spin_lock_init(&sbi->cp_lock); 4390 4391 sbi->dirty_device = 0; 4392 spin_lock_init(&sbi->dev_lock); 4393 4394 init_f2fs_rwsem(&sbi->sb_lock); 4395 init_f2fs_rwsem(&sbi->pin_sem); 4396 } 4397 4398 static int init_percpu_info(struct f2fs_sb_info *sbi) 4399 { 4400 int err; 4401 4402 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL); 4403 if (err) 4404 return err; 4405 4406 err = percpu_counter_init(&sbi->rf_node_block_count, 0, GFP_KERNEL); 4407 if (err) 4408 goto err_valid_block; 4409 4410 err = percpu_counter_init(&sbi->total_valid_inode_count, 0, 4411 GFP_KERNEL); 4412 if (err) 4413 goto err_node_block; 4414 return 0; 4415 4416 err_node_block: 4417 percpu_counter_destroy(&sbi->rf_node_block_count); 4418 err_valid_block: 4419 percpu_counter_destroy(&sbi->alloc_valid_block_count); 4420 return err; 4421 } 4422 4423 #ifdef CONFIG_BLK_DEV_ZONED 4424 4425 struct f2fs_report_zones_args { 4426 struct f2fs_sb_info *sbi; 4427 struct f2fs_dev_info *dev; 4428 }; 4429 4430 static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx, 4431 void *data) 4432 { 4433 struct f2fs_report_zones_args *rz_args = data; 4434 block_t unusable_blocks = (zone->len - zone->capacity) >> 4435 F2FS_LOG_SECTORS_PER_BLOCK; 4436 4437 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) 4438 return 0; 4439 4440 set_bit(idx, rz_args->dev->blkz_seq); 4441 if (!rz_args->sbi->unusable_blocks_per_sec) { 4442 rz_args->sbi->unusable_blocks_per_sec = unusable_blocks; 4443 return 0; 4444 } 4445 if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) { 4446 f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n"); 4447 return -EINVAL; 4448 } 4449 return 0; 4450 } 4451 4452 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) 4453 { 4454 struct block_device *bdev = FDEV(devi).bdev; 4455 sector_t nr_sectors = bdev_nr_sectors(bdev); 4456 struct f2fs_report_zones_args rep_zone_arg; 4457 u64 zone_sectors; 4458 unsigned int max_open_zones; 4459 int ret; 4460 4461 if (!f2fs_sb_has_blkzoned(sbi)) 4462 return 0; 4463 4464 if (bdev_is_zoned(FDEV(devi).bdev)) { 4465 max_open_zones = bdev_max_open_zones(bdev); 4466 if (max_open_zones && (max_open_zones < sbi->max_open_zones)) 4467 sbi->max_open_zones = max_open_zones; 4468 if (sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) { 4469 f2fs_err(sbi, 4470 "zoned: max open zones %u is too small, need at least %u open zones", 4471 sbi->max_open_zones, F2FS_OPTION(sbi).active_logs); 4472 return -EINVAL; 4473 } 4474 } 4475 4476 zone_sectors = bdev_zone_sectors(bdev); 4477 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz != 4478 SECTOR_TO_BLOCK(zone_sectors)) 4479 return -EINVAL; 4480 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(zone_sectors); 4481 FDEV(devi).nr_blkz = div_u64(SECTOR_TO_BLOCK(nr_sectors), 4482 sbi->blocks_per_blkz); 4483 if (nr_sectors & (zone_sectors - 1)) 4484 FDEV(devi).nr_blkz++; 4485 4486 FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi, 4487 BITS_TO_LONGS(FDEV(devi).nr_blkz) 4488 * sizeof(unsigned long), 4489 GFP_KERNEL); 4490 if (!FDEV(devi).blkz_seq) 4491 return -ENOMEM; 4492 4493 rep_zone_arg.sbi = sbi; 4494 rep_zone_arg.dev = &FDEV(devi); 4495 4496 ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb, 4497 &rep_zone_arg); 4498 if (ret < 0) 4499 return ret; 4500 return 0; 4501 } 4502 #endif 4503 4504 /* 4505 * Read f2fs raw super block. 4506 * Because we have two copies of super block, so read both of them 4507 * to get the first valid one. If any one of them is broken, we pass 4508 * them recovery flag back to the caller. 4509 */ 4510 static int read_raw_super_block(struct f2fs_sb_info *sbi, 4511 struct f2fs_super_block **raw_super, 4512 int *valid_super_block, int *recovery) 4513 { 4514 struct super_block *sb = sbi->sb; 4515 int block; 4516 struct folio *folio; 4517 struct f2fs_super_block *super; 4518 int err = 0; 4519 4520 super = kzalloc_obj(struct f2fs_super_block); 4521 if (!super) 4522 return -ENOMEM; 4523 4524 for (block = 0; block < 2; block++) { 4525 folio = read_mapping_folio(sb->s_bdev->bd_mapping, block, NULL); 4526 if (IS_ERR(folio)) { 4527 f2fs_err(sbi, "Unable to read %dth superblock", 4528 block + 1); 4529 err = PTR_ERR(folio); 4530 *recovery = 1; 4531 continue; 4532 } 4533 4534 /* sanity checking of raw super */ 4535 err = sanity_check_raw_super(sbi, folio, block); 4536 if (err) { 4537 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock", 4538 block + 1); 4539 folio_put(folio); 4540 *recovery = 1; 4541 continue; 4542 } 4543 4544 if (!*raw_super) { 4545 memcpy(super, F2FS_SUPER_BLOCK(folio, block), 4546 sizeof(*super)); 4547 *valid_super_block = block; 4548 *raw_super = super; 4549 } 4550 folio_put(folio); 4551 } 4552 4553 /* No valid superblock */ 4554 if (!*raw_super) 4555 kfree(super); 4556 else 4557 err = 0; 4558 4559 return err; 4560 } 4561 4562 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) 4563 { 4564 struct folio *folio; 4565 pgoff_t index; 4566 __u32 crc = 0; 4567 int err; 4568 4569 if ((recover && f2fs_readonly(sbi->sb)) || 4570 f2fs_hw_is_readonly(sbi)) { 4571 set_sbi_flag(sbi, SBI_NEED_SB_WRITE); 4572 return -EROFS; 4573 } 4574 4575 /* we should update superblock crc here */ 4576 if (!recover && f2fs_sb_has_sb_chksum(sbi)) { 4577 crc = f2fs_crc32(F2FS_RAW_SUPER(sbi), 4578 offsetof(struct f2fs_super_block, crc)); 4579 F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc); 4580 } 4581 4582 /* write back-up superblock first */ 4583 index = sbi->valid_super_block ? 0 : 1; 4584 folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL); 4585 if (IS_ERR(folio)) 4586 return PTR_ERR(folio); 4587 err = __f2fs_commit_super(sbi, folio, index, true); 4588 folio_put(folio); 4589 4590 /* if we are in recovery path, skip writing valid superblock */ 4591 if (recover || err) 4592 return err; 4593 4594 /* write current valid superblock */ 4595 index = sbi->valid_super_block; 4596 folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL); 4597 if (IS_ERR(folio)) 4598 return PTR_ERR(folio); 4599 err = __f2fs_commit_super(sbi, folio, index, true); 4600 folio_put(folio); 4601 return err; 4602 } 4603 4604 static void save_stop_reason(struct f2fs_sb_info *sbi, unsigned char reason) 4605 { 4606 unsigned long flags; 4607 4608 spin_lock_irqsave(&sbi->error_lock, flags); 4609 if (sbi->stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0)) 4610 sbi->stop_reason[reason]++; 4611 spin_unlock_irqrestore(&sbi->error_lock, flags); 4612 } 4613 4614 static void f2fs_record_stop_reason(struct f2fs_sb_info *sbi) 4615 { 4616 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 4617 unsigned long flags; 4618 int err; 4619 4620 f2fs_down_write(&sbi->sb_lock); 4621 4622 spin_lock_irqsave(&sbi->error_lock, flags); 4623 if (sbi->error_dirty) { 4624 memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors, 4625 MAX_F2FS_ERRORS); 4626 sbi->error_dirty = false; 4627 } 4628 memcpy(raw_super->s_stop_reason, sbi->stop_reason, MAX_STOP_REASON); 4629 spin_unlock_irqrestore(&sbi->error_lock, flags); 4630 4631 err = f2fs_commit_super(sbi, false); 4632 4633 f2fs_up_write(&sbi->sb_lock); 4634 if (err) 4635 f2fs_err_ratelimited(sbi, 4636 "f2fs_commit_super fails to record stop_reason, err:%d", 4637 err); 4638 } 4639 4640 void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag) 4641 { 4642 unsigned long flags; 4643 4644 spin_lock_irqsave(&sbi->error_lock, flags); 4645 if (!test_bit(flag, (unsigned long *)sbi->errors)) { 4646 set_bit(flag, (unsigned long *)sbi->errors); 4647 sbi->error_dirty = true; 4648 } 4649 spin_unlock_irqrestore(&sbi->error_lock, flags); 4650 } 4651 4652 void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error) 4653 { 4654 f2fs_save_errors(sbi, error); 4655 4656 if (!sbi->error_dirty) 4657 return; 4658 if (!test_bit(error, (unsigned long *)sbi->errors)) 4659 return; 4660 schedule_work(&sbi->s_error_work); 4661 } 4662 4663 static bool system_going_down(void) 4664 { 4665 return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF 4666 || system_state == SYSTEM_RESTART; 4667 } 4668 4669 static void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, 4670 unsigned char reason) 4671 { 4672 struct super_block *sb = sbi->sb; 4673 bool shutdown = reason == STOP_CP_REASON_SHUTDOWN; 4674 bool continue_fs = !shutdown && 4675 F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE; 4676 4677 set_ckpt_flags(sbi, CP_ERROR_FLAG); 4678 4679 if (!f2fs_hw_is_readonly(sbi)) { 4680 save_stop_reason(sbi, reason); 4681 4682 /* 4683 * always create an asynchronous task to record stop_reason 4684 * in order to avoid potential deadlock when running into 4685 * f2fs_record_stop_reason() synchronously. 4686 */ 4687 schedule_work(&sbi->s_error_work); 4688 } 4689 4690 /* 4691 * We force ERRORS_RO behavior when system is rebooting. Otherwise we 4692 * could panic during 'reboot -f' as the underlying device got already 4693 * disabled. 4694 */ 4695 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC && 4696 !shutdown && !system_going_down() && 4697 !is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) 4698 panic("F2FS-fs (device %s): panic forced after error\n", 4699 sb->s_id); 4700 4701 if (shutdown) 4702 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 4703 else 4704 dump_stack(); 4705 4706 /* 4707 * Continue filesystem operators if errors=continue. Should not set 4708 * RO by shutdown, since RO bypasses thaw_super which can hang the 4709 * system. 4710 */ 4711 if (continue_fs || f2fs_readonly(sb) || shutdown) { 4712 f2fs_warn(sbi, "Stopped filesystem due to reason: %d", reason); 4713 return; 4714 } 4715 4716 f2fs_warn(sbi, "Remounting filesystem read-only"); 4717 4718 /* 4719 * We have already set CP_ERROR_FLAG flag to stop all updates 4720 * to filesystem, so it doesn't need to set SB_RDONLY flag here 4721 * because the flag should be set covered w/ sb->s_umount semaphore 4722 * via remount procedure, otherwise, it will confuse code like 4723 * freeze_super() which will lead to deadlocks and other problems. 4724 */ 4725 } 4726 4727 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, 4728 unsigned char reason) 4729 { 4730 f2fs_build_fault_attr(sbi, 0, 0, FAULT_ALL); 4731 if (!end_io) 4732 f2fs_flush_merged_writes(sbi); 4733 f2fs_handle_critical_error(sbi, reason); 4734 } 4735 4736 4737 static void f2fs_record_error_work(struct work_struct *work) 4738 { 4739 struct f2fs_sb_info *sbi = container_of(work, 4740 struct f2fs_sb_info, s_error_work); 4741 4742 f2fs_record_stop_reason(sbi); 4743 } 4744 4745 static inline unsigned int get_first_seq_zone_segno(struct f2fs_sb_info *sbi) 4746 { 4747 #ifdef CONFIG_BLK_DEV_ZONED 4748 unsigned int zoneno, total_zones; 4749 int devi; 4750 4751 if (!f2fs_sb_has_blkzoned(sbi)) 4752 return NULL_SEGNO; 4753 4754 for (devi = 0; devi < sbi->s_ndevs; devi++) { 4755 if (!bdev_is_zoned(FDEV(devi).bdev)) 4756 continue; 4757 4758 total_zones = GET_ZONE_FROM_SEG(sbi, FDEV(devi).total_segments); 4759 4760 for (zoneno = 0; zoneno < total_zones; zoneno++) { 4761 unsigned int segs, blks; 4762 4763 if (!f2fs_zone_is_seq(sbi, devi, zoneno)) 4764 continue; 4765 4766 segs = GET_SEG_FROM_SEC(sbi, 4767 zoneno * sbi->secs_per_zone); 4768 blks = SEGS_TO_BLKS(sbi, segs); 4769 return GET_SEGNO(sbi, FDEV(devi).start_blk + blks); 4770 } 4771 } 4772 #endif 4773 return NULL_SEGNO; 4774 } 4775 4776 static int f2fs_scan_devices(struct f2fs_sb_info *sbi) 4777 { 4778 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 4779 unsigned int max_devices = MAX_DEVICES; 4780 unsigned int logical_blksize; 4781 blk_mode_t mode = sb_open_mode(sbi->sb->s_flags); 4782 int i; 4783 4784 /* Initialize single device information */ 4785 if (!RDEV(0).path[0]) { 4786 if (!bdev_is_zoned(sbi->sb->s_bdev)) 4787 return 0; 4788 max_devices = 1; 4789 } 4790 4791 /* 4792 * Initialize multiple devices information, or single 4793 * zoned block device information. 4794 */ 4795 sbi->devs = f2fs_kzalloc(sbi, 4796 array_size(max_devices, 4797 sizeof(struct f2fs_dev_info)), 4798 GFP_KERNEL); 4799 if (!sbi->devs) 4800 return -ENOMEM; 4801 4802 logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev); 4803 sbi->aligned_blksize = true; 4804 sbi->bggc_io_aware = AWARE_ALL_IO; 4805 #ifdef CONFIG_BLK_DEV_ZONED 4806 sbi->max_open_zones = UINT_MAX; 4807 sbi->blkzone_alloc_policy = BLKZONE_ALLOC_PRIOR_SEQ; 4808 sbi->bggc_io_aware = AWARE_READ_IO; 4809 #endif 4810 4811 for (i = 0; i < max_devices; i++) { 4812 if (max_devices == 1) { 4813 FDEV(i).total_segments = 4814 le32_to_cpu(raw_super->segment_count_main); 4815 FDEV(i).start_blk = 0; 4816 FDEV(i).end_blk = FDEV(i).total_segments * 4817 BLKS_PER_SEG(sbi); 4818 } 4819 4820 if (i == 0) 4821 FDEV(0).bdev_file = sbi->sb->s_bdev_file; 4822 else if (!RDEV(i).path[0]) 4823 break; 4824 4825 if (max_devices > 1) { 4826 /* Multi-device mount */ 4827 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN); 4828 FDEV(i).total_segments = 4829 le32_to_cpu(RDEV(i).total_segments); 4830 if (i == 0) { 4831 FDEV(i).start_blk = 0; 4832 FDEV(i).end_blk = FDEV(i).start_blk + 4833 SEGS_TO_BLKS(sbi, 4834 FDEV(i).total_segments) - 1 + 4835 le32_to_cpu(raw_super->segment0_blkaddr); 4836 sbi->allocate_section_hint = FDEV(i).total_segments / 4837 SEGS_PER_SEC(sbi); 4838 } else { 4839 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1; 4840 FDEV(i).end_blk = FDEV(i).start_blk + 4841 SEGS_TO_BLKS(sbi, 4842 FDEV(i).total_segments) - 1; 4843 FDEV(i).bdev_file = bdev_file_open_by_path( 4844 FDEV(i).path, mode, sbi->sb, NULL); 4845 } 4846 } 4847 if (IS_ERR(FDEV(i).bdev_file)) 4848 return PTR_ERR(FDEV(i).bdev_file); 4849 4850 FDEV(i).bdev = file_bdev(FDEV(i).bdev_file); 4851 /* to release errored devices */ 4852 sbi->s_ndevs = i + 1; 4853 4854 if (logical_blksize != bdev_logical_block_size(FDEV(i).bdev)) 4855 sbi->aligned_blksize = false; 4856 4857 #ifdef CONFIG_BLK_DEV_ZONED 4858 if (bdev_is_zoned(FDEV(i).bdev)) { 4859 if (!f2fs_sb_has_blkzoned(sbi)) { 4860 f2fs_err(sbi, "Zoned block device feature not enabled"); 4861 return -EINVAL; 4862 } 4863 if (init_blkz_info(sbi, i)) { 4864 f2fs_err(sbi, "Failed to initialize F2FS blkzone information"); 4865 return -EINVAL; 4866 } 4867 if (max_devices == 1) 4868 break; 4869 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: Host-managed)", 4870 i, FDEV(i).path, 4871 FDEV(i).total_segments, 4872 FDEV(i).start_blk, FDEV(i).end_blk); 4873 continue; 4874 } 4875 #endif 4876 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x", 4877 i, FDEV(i).path, 4878 FDEV(i).total_segments, 4879 FDEV(i).start_blk, FDEV(i).end_blk); 4880 } 4881 return 0; 4882 } 4883 4884 static int f2fs_setup_casefold(struct f2fs_sb_info *sbi) 4885 { 4886 #if IS_ENABLED(CONFIG_UNICODE) 4887 if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) { 4888 const struct f2fs_sb_encodings *encoding_info; 4889 struct unicode_map *encoding; 4890 __u16 encoding_flags; 4891 4892 encoding_info = f2fs_sb_read_encoding(sbi->raw_super); 4893 if (!encoding_info) { 4894 f2fs_err(sbi, 4895 "Encoding requested by superblock is unknown"); 4896 return -EINVAL; 4897 } 4898 4899 encoding_flags = le16_to_cpu(sbi->raw_super->s_encoding_flags); 4900 encoding = utf8_load(encoding_info->version); 4901 if (IS_ERR(encoding)) { 4902 f2fs_err(sbi, 4903 "can't mount with superblock charset: %s-%u.%u.%u " 4904 "not supported by the kernel. flags: 0x%x.", 4905 encoding_info->name, 4906 unicode_major(encoding_info->version), 4907 unicode_minor(encoding_info->version), 4908 unicode_rev(encoding_info->version), 4909 encoding_flags); 4910 return PTR_ERR(encoding); 4911 } 4912 f2fs_info(sbi, "Using encoding defined by superblock: " 4913 "%s-%u.%u.%u with flags 0x%hx", encoding_info->name, 4914 unicode_major(encoding_info->version), 4915 unicode_minor(encoding_info->version), 4916 unicode_rev(encoding_info->version), 4917 encoding_flags); 4918 4919 sbi->sb->s_encoding = encoding; 4920 sbi->sb->s_encoding_flags = encoding_flags; 4921 } 4922 #else 4923 if (f2fs_sb_has_casefold(sbi)) { 4924 f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE"); 4925 return -EINVAL; 4926 } 4927 #endif 4928 return 0; 4929 } 4930 4931 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi) 4932 { 4933 /* adjust parameters according to the volume size */ 4934 if (MAIN_SEGS(sbi) <= SMALL_VOLUME_SEGMENTS) { 4935 if (f2fs_block_unit_discard(sbi)) 4936 SM_I(sbi)->dcc_info->discard_granularity = 4937 MIN_DISCARD_GRANULARITY; 4938 if (!f2fs_lfs_mode(sbi)) 4939 SM_I(sbi)->ipu_policy = BIT(F2FS_IPU_FORCE) | 4940 BIT(F2FS_IPU_HONOR_OPU_WRITE); 4941 } 4942 4943 sbi->readdir_ra = true; 4944 } 4945 4946 static int f2fs_fill_super(struct super_block *sb, struct fs_context *fc) 4947 { 4948 struct f2fs_fs_context *ctx = fc->fs_private; 4949 struct f2fs_sb_info *sbi; 4950 struct f2fs_super_block *raw_super; 4951 struct inode *root; 4952 int err; 4953 bool skip_recovery = false, need_fsck = false; 4954 int recovery, i, valid_super_block; 4955 struct curseg_info *seg_i; 4956 int retry_cnt = 1; 4957 #ifdef CONFIG_QUOTA 4958 bool quota_enabled = false; 4959 #endif 4960 4961 try_onemore: 4962 err = -EINVAL; 4963 raw_super = NULL; 4964 valid_super_block = -1; 4965 recovery = 0; 4966 4967 /* allocate memory for f2fs-specific super block info */ 4968 sbi = kzalloc_obj(struct f2fs_sb_info); 4969 if (!sbi) 4970 return -ENOMEM; 4971 4972 sbi->sb = sb; 4973 4974 /* initialize locks within allocated memory */ 4975 init_f2fs_rwsem_trace(&sbi->gc_lock, sbi, LOCK_NAME_GC_LOCK); 4976 mutex_init(&sbi->writepages); 4977 init_f2fs_rwsem_trace(&sbi->cp_global_sem, sbi, LOCK_NAME_CP_GLOBAL); 4978 #ifdef CONFIG_DEBUG_LOCK_ALLOC 4979 lockdep_register_key(&sbi->cp_global_sem_key); 4980 lockdep_set_class(&sbi->cp_global_sem.internal_rwsem, 4981 &sbi->cp_global_sem_key); 4982 #endif 4983 init_f2fs_rwsem_trace(&sbi->node_write, sbi, LOCK_NAME_NODE_WRITE); 4984 init_f2fs_rwsem_trace(&sbi->node_change, sbi, LOCK_NAME_NODE_CHANGE); 4985 spin_lock_init(&sbi->stat_lock); 4986 init_f2fs_rwsem_trace(&sbi->cp_rwsem, sbi, LOCK_NAME_CP_RWSEM); 4987 init_f2fs_rwsem(&sbi->quota_sem); 4988 init_waitqueue_head(&sbi->cp_wait); 4989 spin_lock_init(&sbi->error_lock); 4990 4991 for (i = 0; i < NR_INODE_TYPE; i++) { 4992 INIT_LIST_HEAD(&sbi->inode_list[i]); 4993 spin_lock_init(&sbi->inode_lock[i]); 4994 } 4995 mutex_init(&sbi->flush_lock); 4996 4997 /* set a block size */ 4998 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) { 4999 f2fs_err(sbi, "unable to set blocksize"); 5000 goto free_sbi; 5001 } 5002 5003 err = read_raw_super_block(sbi, &raw_super, &valid_super_block, 5004 &recovery); 5005 if (err) 5006 goto free_sbi; 5007 5008 sb->s_fs_info = sbi; 5009 sbi->raw_super = raw_super; 5010 5011 INIT_WORK(&sbi->s_error_work, f2fs_record_error_work); 5012 memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS); 5013 memcpy(sbi->stop_reason, raw_super->s_stop_reason, MAX_STOP_REASON); 5014 5015 /* precompute checksum seed for metadata */ 5016 if (f2fs_sb_has_inode_chksum(sbi)) 5017 sbi->s_chksum_seed = f2fs_chksum(~0, raw_super->uuid, 5018 sizeof(raw_super->uuid)); 5019 5020 default_options(sbi, false); 5021 5022 err = f2fs_check_opt_consistency(fc, sb); 5023 if (err) 5024 goto free_sb_buf; 5025 5026 f2fs_apply_options(fc, sb); 5027 5028 err = f2fs_sanity_check_options(sbi, false); 5029 if (err) 5030 goto free_options; 5031 5032 sb->s_maxbytes = max_file_blocks(NULL) << 5033 le32_to_cpu(raw_super->log_blocksize); 5034 sb->s_max_links = F2FS_LINK_MAX; 5035 5036 err = f2fs_setup_casefold(sbi); 5037 if (err) 5038 goto free_options; 5039 5040 #ifdef CONFIG_QUOTA 5041 sb->dq_op = &f2fs_quota_operations; 5042 sb->s_qcop = &f2fs_quotactl_ops; 5043 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 5044 5045 if (f2fs_sb_has_quota_ino(sbi)) { 5046 for (i = 0; i < MAXQUOTAS; i++) { 5047 if (f2fs_qf_ino(sbi->sb, i)) 5048 sbi->nquota_files++; 5049 } 5050 } 5051 #endif 5052 5053 sb->s_op = &f2fs_sops; 5054 #ifdef CONFIG_FS_ENCRYPTION 5055 sb->s_cop = &f2fs_cryptops; 5056 #endif 5057 #ifdef CONFIG_FS_VERITY 5058 sb->s_vop = &f2fs_verityops; 5059 #endif 5060 sb->s_xattr = f2fs_xattr_handlers; 5061 sb->s_export_op = &f2fs_export_ops; 5062 sb->s_magic = F2FS_SUPER_MAGIC; 5063 sb->s_time_gran = 1; 5064 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 5065 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); 5066 if (test_opt(sbi, INLINECRYPT)) 5067 sb->s_flags |= SB_INLINECRYPT; 5068 5069 if (test_opt(sbi, LAZYTIME)) 5070 sb->s_flags |= SB_LAZYTIME; 5071 else 5072 sb->s_flags &= ~SB_LAZYTIME; 5073 5074 super_set_uuid(sb, (void *) raw_super->uuid, sizeof(raw_super->uuid)); 5075 super_set_sysfs_name_bdev(sb); 5076 sb->s_iflags |= SB_I_CGROUPWB; 5077 5078 /* init f2fs-specific super block info */ 5079 sbi->valid_super_block = valid_super_block; 5080 5081 /* disallow all the data/node/meta page writes */ 5082 set_sbi_flag(sbi, SBI_POR_DOING); 5083 5084 err = f2fs_init_write_merge_io(sbi); 5085 if (err) 5086 goto free_bio_info; 5087 5088 init_sb_info(sbi); 5089 5090 err = f2fs_init_iostat(sbi); 5091 if (err) 5092 goto free_bio_info; 5093 5094 err = init_percpu_info(sbi); 5095 if (err) 5096 goto free_iostat; 5097 5098 err = f2fs_init_page_array_cache(sbi); 5099 if (err) 5100 goto free_percpu; 5101 5102 /* get an inode for meta space */ 5103 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); 5104 if (IS_ERR(sbi->meta_inode)) { 5105 f2fs_err(sbi, "Failed to read F2FS meta data inode"); 5106 err = PTR_ERR(sbi->meta_inode); 5107 goto free_page_array_cache; 5108 } 5109 5110 err = f2fs_get_valid_checkpoint(sbi); 5111 if (err) { 5112 f2fs_err(sbi, "Failed to get valid F2FS checkpoint"); 5113 goto free_meta_inode; 5114 } 5115 5116 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG)) 5117 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 5118 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) { 5119 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); 5120 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL; 5121 } 5122 5123 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG)) 5124 set_sbi_flag(sbi, SBI_NEED_FSCK); 5125 5126 /* Initialize device list */ 5127 err = f2fs_scan_devices(sbi); 5128 if (err) { 5129 f2fs_err(sbi, "Failed to find devices"); 5130 goto free_devices; 5131 } 5132 5133 err = f2fs_init_post_read_wq(sbi); 5134 if (err) { 5135 f2fs_err(sbi, "Failed to initialize post read workqueue"); 5136 goto free_devices; 5137 } 5138 5139 sbi->total_valid_node_count = 5140 le32_to_cpu(sbi->ckpt->valid_node_count); 5141 percpu_counter_set(&sbi->total_valid_inode_count, 5142 le32_to_cpu(sbi->ckpt->valid_inode_count)); 5143 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); 5144 sbi->total_valid_block_count = 5145 le64_to_cpu(sbi->ckpt->valid_block_count); 5146 sbi->last_valid_block_count = sbi->total_valid_block_count; 5147 sbi->reserved_blocks = 0; 5148 sbi->current_reserved_blocks = 0; 5149 limit_reserve_root(sbi); 5150 adjust_unusable_cap_perc(sbi); 5151 5152 f2fs_init_extent_cache_info(sbi); 5153 5154 f2fs_init_ino_entry_info(sbi); 5155 5156 f2fs_init_fsync_node_info(sbi); 5157 5158 /* setup checkpoint request control and start checkpoint issue thread */ 5159 f2fs_init_ckpt_req_control(sbi); 5160 if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) && 5161 test_opt(sbi, MERGE_CHECKPOINT)) { 5162 err = f2fs_start_ckpt_thread(sbi); 5163 if (err) { 5164 f2fs_err(sbi, 5165 "Failed to start F2FS issue_checkpoint_thread (%d)", 5166 err); 5167 goto stop_ckpt_thread; 5168 } 5169 } 5170 5171 /* setup f2fs internal modules */ 5172 err = f2fs_build_segment_manager(sbi); 5173 if (err) { 5174 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)", 5175 err); 5176 goto free_sm; 5177 } 5178 err = f2fs_build_node_manager(sbi); 5179 if (err) { 5180 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)", 5181 err); 5182 goto free_nm; 5183 } 5184 5185 /* For write statistics */ 5186 sbi->sectors_written_start = f2fs_get_sectors_written(sbi); 5187 5188 /* get segno of first zoned block device */ 5189 sbi->first_seq_zone_segno = get_first_seq_zone_segno(sbi); 5190 5191 sbi->reserved_pin_section = f2fs_sb_has_blkzoned(sbi) ? 5192 ZONED_PIN_SEC_REQUIRED_COUNT : 5193 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)); 5194 5195 /* Read accumulated write IO statistics if exists */ 5196 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); 5197 if (__exist_node_summaries(sbi)) 5198 sbi->kbytes_written = 5199 le64_to_cpu(seg_i->journal->info.kbytes_written); 5200 5201 f2fs_build_gc_manager(sbi); 5202 5203 err = f2fs_build_stats(sbi); 5204 if (err) 5205 goto free_nm; 5206 5207 /* get an inode for node space */ 5208 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); 5209 if (IS_ERR(sbi->node_inode)) { 5210 f2fs_err(sbi, "Failed to read node inode"); 5211 err = PTR_ERR(sbi->node_inode); 5212 goto free_stats; 5213 } 5214 5215 /* read root inode and dentry */ 5216 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); 5217 if (IS_ERR(root)) { 5218 f2fs_err(sbi, "Failed to read root inode"); 5219 err = PTR_ERR(root); 5220 goto free_node_inode; 5221 } 5222 if (!S_ISDIR(root->i_mode) || !root->i_blocks || 5223 !root->i_size || !root->i_nlink) { 5224 iput(root); 5225 err = -EINVAL; 5226 goto free_node_inode; 5227 } 5228 5229 generic_set_sb_d_ops(sb); 5230 sb->s_root = d_make_root(root); /* allocate root dentry */ 5231 if (!sb->s_root) { 5232 err = -ENOMEM; 5233 goto free_node_inode; 5234 } 5235 5236 err = f2fs_init_compress_inode(sbi); 5237 if (err) 5238 goto free_root_inode; 5239 5240 err = f2fs_register_sysfs(sbi); 5241 if (err) 5242 goto free_compress_inode; 5243 5244 sbi->umount_lock_holder = current; 5245 #ifdef CONFIG_QUOTA 5246 /* Enable quota usage during mount */ 5247 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) { 5248 err = f2fs_enable_quotas(sb); 5249 if (err) 5250 f2fs_err(sbi, "Cannot turn on quotas: error %d", err); 5251 } 5252 5253 quota_enabled = f2fs_recover_quota_begin(sbi); 5254 #endif 5255 /* if there are any orphan inodes, free them */ 5256 err = f2fs_recover_orphan_inodes(sbi); 5257 if (err) 5258 goto free_meta; 5259 5260 if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG))) { 5261 skip_recovery = true; 5262 goto reset_checkpoint; 5263 } 5264 5265 /* recover fsynced data */ 5266 if (!test_opt(sbi, DISABLE_ROLL_FORWARD) && 5267 !test_opt(sbi, NORECOVERY)) { 5268 /* 5269 * mount should be failed, when device has readonly mode, and 5270 * previous checkpoint was not done by clean system shutdown. 5271 */ 5272 if (f2fs_hw_is_readonly(sbi)) { 5273 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { 5274 err = f2fs_recover_fsync_data(sbi, true); 5275 if (err > 0) { 5276 err = -EROFS; 5277 f2fs_err(sbi, "Need to recover fsync data, but " 5278 "write access unavailable, please try " 5279 "mount w/ disable_roll_forward or norecovery"); 5280 } 5281 if (err < 0) 5282 goto free_meta; 5283 } 5284 f2fs_info(sbi, "write access unavailable, skipping recovery"); 5285 goto reset_checkpoint; 5286 } 5287 5288 if (need_fsck) 5289 set_sbi_flag(sbi, SBI_NEED_FSCK); 5290 5291 if (skip_recovery) 5292 goto reset_checkpoint; 5293 5294 err = f2fs_recover_fsync_data(sbi, false); 5295 if (err < 0) { 5296 if (err != -ENOMEM) 5297 skip_recovery = true; 5298 need_fsck = true; 5299 f2fs_err(sbi, "Cannot recover all fsync data errno=%d", 5300 err); 5301 goto free_meta; 5302 } 5303 } else { 5304 err = f2fs_recover_fsync_data(sbi, true); 5305 if (err > 0) { 5306 if (!f2fs_readonly(sb)) { 5307 f2fs_err(sbi, "Need to recover fsync data"); 5308 err = -EINVAL; 5309 goto free_meta; 5310 } else { 5311 f2fs_info(sbi, "drop all fsynced data"); 5312 err = 0; 5313 } 5314 } 5315 } 5316 5317 reset_checkpoint: 5318 #ifdef CONFIG_QUOTA 5319 f2fs_recover_quota_end(sbi, quota_enabled); 5320 #endif 5321 /* 5322 * If the f2fs is not readonly and fsync data recovery succeeds, 5323 * write pointer consistency of cursegs and other zones are already 5324 * checked and fixed during recovery. However, if recovery fails, 5325 * write pointers are left untouched, and retry-mount should check 5326 * them here. 5327 */ 5328 if (skip_recovery) 5329 err = f2fs_check_and_fix_write_pointer(sbi); 5330 if (err) 5331 goto free_meta; 5332 5333 /* f2fs_recover_fsync_data() cleared this already */ 5334 clear_sbi_flag(sbi, SBI_POR_DOING); 5335 5336 err = f2fs_init_inmem_curseg(sbi); 5337 if (err) 5338 goto sync_free_meta; 5339 5340 if (test_opt(sbi, DISABLE_CHECKPOINT)) 5341 err = f2fs_disable_checkpoint(sbi); 5342 else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) 5343 err = f2fs_enable_checkpoint(sbi); 5344 if (err) 5345 goto sync_free_meta; 5346 5347 /* 5348 * If filesystem is not mounted as read-only then 5349 * do start the gc_thread. 5350 */ 5351 if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF || 5352 test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) { 5353 /* After POR, we can run background GC thread.*/ 5354 err = f2fs_start_gc_thread(sbi); 5355 if (err) 5356 goto sync_free_meta; 5357 } 5358 5359 /* recover broken superblock */ 5360 if (recovery) { 5361 err = f2fs_commit_super(sbi, true); 5362 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d", 5363 sbi->valid_super_block ? 1 : 2, err); 5364 } 5365 5366 f2fs_join_shrinker(sbi); 5367 5368 f2fs_tuning_parameters(sbi); 5369 5370 f2fs_notice(sbi, "Mounted with checkpoint version = %llx", 5371 cur_cp_version(F2FS_CKPT(sbi))); 5372 f2fs_update_time(sbi, CP_TIME); 5373 f2fs_update_time(sbi, REQ_TIME); 5374 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); 5375 5376 sbi->umount_lock_holder = NULL; 5377 return 0; 5378 5379 sync_free_meta: 5380 /* safe to flush all the data */ 5381 sync_filesystem(sbi->sb); 5382 retry_cnt = 0; 5383 5384 free_meta: 5385 #ifdef CONFIG_QUOTA 5386 f2fs_truncate_quota_inode_pages(sb); 5387 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) 5388 f2fs_quota_off_umount(sbi->sb); 5389 #endif 5390 /* 5391 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes() 5392 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg() 5393 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which 5394 * falls into an infinite loop in f2fs_sync_meta_pages(). 5395 */ 5396 truncate_inode_pages_final(META_MAPPING(sbi)); 5397 /* evict some inodes being cached by GC */ 5398 evict_inodes(sb); 5399 f2fs_unregister_sysfs(sbi); 5400 free_compress_inode: 5401 f2fs_destroy_compress_inode(sbi); 5402 free_root_inode: 5403 dput(sb->s_root); 5404 sb->s_root = NULL; 5405 free_node_inode: 5406 f2fs_release_ino_entry(sbi, true); 5407 truncate_inode_pages_final(NODE_MAPPING(sbi)); 5408 iput(sbi->node_inode); 5409 sbi->node_inode = NULL; 5410 free_stats: 5411 f2fs_destroy_stats(sbi); 5412 free_nm: 5413 /* stop discard thread before destroying node manager */ 5414 f2fs_stop_discard_thread(sbi); 5415 f2fs_destroy_node_manager(sbi); 5416 free_sm: 5417 f2fs_destroy_segment_manager(sbi); 5418 stop_ckpt_thread: 5419 f2fs_stop_ckpt_thread(sbi); 5420 /* flush s_error_work before sbi destroy */ 5421 flush_work(&sbi->s_error_work); 5422 f2fs_destroy_post_read_wq(sbi); 5423 free_devices: 5424 destroy_device_list(sbi); 5425 kvfree(sbi->ckpt); 5426 free_meta_inode: 5427 make_bad_inode(sbi->meta_inode); 5428 iput(sbi->meta_inode); 5429 sbi->meta_inode = NULL; 5430 free_page_array_cache: 5431 f2fs_destroy_page_array_cache(sbi); 5432 free_percpu: 5433 destroy_percpu_info(sbi); 5434 free_iostat: 5435 f2fs_destroy_iostat(sbi); 5436 free_bio_info: 5437 for (i = 0; i < NR_PAGE_TYPE; i++) 5438 kfree(sbi->write_io[i]); 5439 5440 #if IS_ENABLED(CONFIG_UNICODE) 5441 utf8_unload(sb->s_encoding); 5442 sb->s_encoding = NULL; 5443 #endif 5444 free_options: 5445 #ifdef CONFIG_QUOTA 5446 for (i = 0; i < MAXQUOTAS; i++) 5447 kfree(F2FS_OPTION(sbi).s_qf_names[i]); 5448 #endif 5449 /* no need to free dummy_enc_policy, we just keep it in ctx when failed */ 5450 swap(F2FS_CTX_INFO(ctx).dummy_enc_policy, F2FS_OPTION(sbi).dummy_enc_policy); 5451 free_sb_buf: 5452 kfree(raw_super); 5453 free_sbi: 5454 #ifdef CONFIG_DEBUG_LOCK_ALLOC 5455 lockdep_unregister_key(&sbi->cp_global_sem_key); 5456 #endif 5457 kfree(sbi); 5458 sb->s_fs_info = NULL; 5459 5460 /* give only one another chance */ 5461 if (retry_cnt > 0 && skip_recovery) { 5462 retry_cnt--; 5463 shrink_dcache_sb(sb); 5464 goto try_onemore; 5465 } 5466 return err; 5467 } 5468 5469 static int f2fs_get_tree(struct fs_context *fc) 5470 { 5471 return get_tree_bdev(fc, f2fs_fill_super); 5472 } 5473 5474 static int f2fs_reconfigure(struct fs_context *fc) 5475 { 5476 struct super_block *sb = fc->root->d_sb; 5477 5478 return __f2fs_remount(fc, sb); 5479 } 5480 5481 static void f2fs_fc_free(struct fs_context *fc) 5482 { 5483 struct f2fs_fs_context *ctx = fc->fs_private; 5484 5485 if (!ctx) 5486 return; 5487 5488 #ifdef CONFIG_QUOTA 5489 f2fs_unnote_qf_name_all(fc); 5490 #endif 5491 fscrypt_free_dummy_policy(&F2FS_CTX_INFO(ctx).dummy_enc_policy); 5492 kfree(ctx); 5493 } 5494 5495 static const struct fs_context_operations f2fs_context_ops = { 5496 .parse_param = f2fs_parse_param, 5497 .get_tree = f2fs_get_tree, 5498 .reconfigure = f2fs_reconfigure, 5499 .free = f2fs_fc_free, 5500 }; 5501 5502 static void kill_f2fs_super(struct super_block *sb) 5503 { 5504 struct f2fs_sb_info *sbi = F2FS_SB(sb); 5505 5506 if (sb->s_root) { 5507 sbi->umount_lock_holder = current; 5508 5509 set_sbi_flag(sbi, SBI_IS_CLOSE); 5510 f2fs_stop_gc_thread(sbi); 5511 f2fs_stop_discard_thread(sbi); 5512 5513 #ifdef CONFIG_F2FS_FS_COMPRESSION 5514 /* 5515 * latter evict_inode() can bypass checking and invalidating 5516 * compress inode cache. 5517 */ 5518 if (test_opt(sbi, COMPRESS_CACHE)) 5519 truncate_inode_pages_final(COMPRESS_MAPPING(sbi)); 5520 #endif 5521 5522 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) || 5523 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { 5524 struct cp_control cpc = { 5525 .reason = CP_UMOUNT, 5526 }; 5527 stat_inc_cp_call_count(sbi, TOTAL_CALL); 5528 f2fs_write_checkpoint(sbi, &cpc); 5529 } 5530 5531 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb)) 5532 sb->s_flags &= ~SB_RDONLY; 5533 } 5534 kill_block_super(sb); 5535 /* Release block devices last, after fscrypt_destroy_keyring(). */ 5536 if (sbi) { 5537 destroy_device_list(sbi); 5538 #ifdef CONFIG_DEBUG_LOCK_ALLOC 5539 lockdep_unregister_key(&sbi->cp_global_sem_key); 5540 #endif 5541 kfree(sbi); 5542 sb->s_fs_info = NULL; 5543 } 5544 } 5545 5546 static int f2fs_init_fs_context(struct fs_context *fc) 5547 { 5548 struct f2fs_fs_context *ctx; 5549 5550 ctx = kzalloc_obj(struct f2fs_fs_context); 5551 if (!ctx) 5552 return -ENOMEM; 5553 5554 fc->fs_private = ctx; 5555 fc->ops = &f2fs_context_ops; 5556 5557 return 0; 5558 } 5559 5560 static struct file_system_type f2fs_fs_type = { 5561 .owner = THIS_MODULE, 5562 .name = "f2fs", 5563 .init_fs_context = f2fs_init_fs_context, 5564 .kill_sb = kill_f2fs_super, 5565 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 5566 }; 5567 MODULE_ALIAS_FS("f2fs"); 5568 5569 static int __init init_inodecache(void) 5570 { 5571 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache", 5572 sizeof(struct f2fs_inode_info), 0, 5573 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL); 5574 return f2fs_inode_cachep ? 0 : -ENOMEM; 5575 } 5576 5577 static void destroy_inodecache(void) 5578 { 5579 /* 5580 * Make sure all delayed rcu free inodes are flushed before we 5581 * destroy cache. 5582 */ 5583 rcu_barrier(); 5584 kmem_cache_destroy(f2fs_inode_cachep); 5585 } 5586 5587 static int __init init_f2fs_fs(void) 5588 { 5589 int err; 5590 5591 err = init_inodecache(); 5592 if (err) 5593 goto fail; 5594 err = f2fs_create_node_manager_caches(); 5595 if (err) 5596 goto free_inodecache; 5597 err = f2fs_create_segment_manager_caches(); 5598 if (err) 5599 goto free_node_manager_caches; 5600 err = f2fs_create_checkpoint_caches(); 5601 if (err) 5602 goto free_segment_manager_caches; 5603 err = f2fs_create_recovery_cache(); 5604 if (err) 5605 goto free_checkpoint_caches; 5606 err = f2fs_create_extent_cache(); 5607 if (err) 5608 goto free_recovery_cache; 5609 err = f2fs_create_garbage_collection_cache(); 5610 if (err) 5611 goto free_extent_cache; 5612 err = f2fs_init_sysfs(); 5613 if (err) 5614 goto free_garbage_collection_cache; 5615 err = f2fs_init_shrinker(); 5616 if (err) 5617 goto free_sysfs; 5618 f2fs_create_root_stats(); 5619 err = f2fs_init_post_read_processing(); 5620 if (err) 5621 goto free_root_stats; 5622 err = f2fs_init_iostat_processing(); 5623 if (err) 5624 goto free_post_read; 5625 err = f2fs_init_bio_entry_cache(); 5626 if (err) 5627 goto free_iostat; 5628 err = f2fs_init_bioset(); 5629 if (err) 5630 goto free_bio_entry_cache; 5631 err = f2fs_init_compress_mempool(); 5632 if (err) 5633 goto free_bioset; 5634 err = f2fs_init_compress_cache(); 5635 if (err) 5636 goto free_compress_mempool; 5637 err = f2fs_create_casefold_cache(); 5638 if (err) 5639 goto free_compress_cache; 5640 err = f2fs_init_xattr_cache(); 5641 if (err) 5642 goto free_casefold_cache; 5643 err = register_filesystem(&f2fs_fs_type); 5644 if (err) 5645 goto free_xattr_cache; 5646 return 0; 5647 free_xattr_cache: 5648 f2fs_destroy_xattr_cache(); 5649 free_casefold_cache: 5650 f2fs_destroy_casefold_cache(); 5651 free_compress_cache: 5652 f2fs_destroy_compress_cache(); 5653 free_compress_mempool: 5654 f2fs_destroy_compress_mempool(); 5655 free_bioset: 5656 f2fs_destroy_bioset(); 5657 free_bio_entry_cache: 5658 f2fs_destroy_bio_entry_cache(); 5659 free_iostat: 5660 f2fs_destroy_iostat_processing(); 5661 free_post_read: 5662 f2fs_destroy_post_read_processing(); 5663 free_root_stats: 5664 f2fs_destroy_root_stats(); 5665 f2fs_exit_shrinker(); 5666 free_sysfs: 5667 f2fs_exit_sysfs(); 5668 free_garbage_collection_cache: 5669 f2fs_destroy_garbage_collection_cache(); 5670 free_extent_cache: 5671 f2fs_destroy_extent_cache(); 5672 free_recovery_cache: 5673 f2fs_destroy_recovery_cache(); 5674 free_checkpoint_caches: 5675 f2fs_destroy_checkpoint_caches(); 5676 free_segment_manager_caches: 5677 f2fs_destroy_segment_manager_caches(); 5678 free_node_manager_caches: 5679 f2fs_destroy_node_manager_caches(); 5680 free_inodecache: 5681 destroy_inodecache(); 5682 fail: 5683 return err; 5684 } 5685 5686 static void __exit exit_f2fs_fs(void) 5687 { 5688 unregister_filesystem(&f2fs_fs_type); 5689 f2fs_destroy_xattr_cache(); 5690 f2fs_destroy_casefold_cache(); 5691 f2fs_destroy_compress_cache(); 5692 f2fs_destroy_compress_mempool(); 5693 f2fs_destroy_bioset(); 5694 f2fs_destroy_bio_entry_cache(); 5695 f2fs_destroy_iostat_processing(); 5696 f2fs_destroy_post_read_processing(); 5697 f2fs_destroy_root_stats(); 5698 f2fs_exit_shrinker(); 5699 f2fs_exit_sysfs(); 5700 f2fs_destroy_garbage_collection_cache(); 5701 f2fs_destroy_extent_cache(); 5702 f2fs_destroy_recovery_cache(); 5703 f2fs_destroy_checkpoint_caches(); 5704 f2fs_destroy_segment_manager_caches(); 5705 f2fs_destroy_node_manager_caches(); 5706 destroy_inodecache(); 5707 } 5708 5709 module_init(init_f2fs_fs) 5710 module_exit(exit_f2fs_fs) 5711 5712 MODULE_AUTHOR("Samsung Electronics's Praesto Team"); 5713 MODULE_DESCRIPTION("Flash Friendly File System"); 5714 MODULE_LICENSE("GPL"); 5715