xref: /linux/fs/erofs/super.c (revision ea8a163e02d6925773129e2dd86e419e491b791d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #include <linux/module.h>
7 #include <linux/buffer_head.h>
8 #include <linux/statfs.h>
9 #include <linux/parser.h>
10 #include <linux/seq_file.h>
11 #include <linux/crc32c.h>
12 #include <linux/fs_context.h>
13 #include <linux/fs_parser.h>
14 #include <linux/dax.h>
15 #include "xattr.h"
16 
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/erofs.h>
19 
20 static struct kmem_cache *erofs_inode_cachep __read_mostly;
21 
22 void _erofs_err(struct super_block *sb, const char *function,
23 		const char *fmt, ...)
24 {
25 	struct va_format vaf;
26 	va_list args;
27 
28 	va_start(args, fmt);
29 
30 	vaf.fmt = fmt;
31 	vaf.va = &args;
32 
33 	pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf);
34 	va_end(args);
35 }
36 
37 void _erofs_info(struct super_block *sb, const char *function,
38 		 const char *fmt, ...)
39 {
40 	struct va_format vaf;
41 	va_list args;
42 
43 	va_start(args, fmt);
44 
45 	vaf.fmt = fmt;
46 	vaf.va = &args;
47 
48 	pr_info("(device %s): %pV", sb->s_id, &vaf);
49 	va_end(args);
50 }
51 
52 static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
53 {
54 	struct erofs_super_block *dsb;
55 	u32 expected_crc, crc;
56 
57 	dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET,
58 		      EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL);
59 	if (!dsb)
60 		return -ENOMEM;
61 
62 	expected_crc = le32_to_cpu(dsb->checksum);
63 	dsb->checksum = 0;
64 	/* to allow for x86 boot sectors and other oddities. */
65 	crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET);
66 	kfree(dsb);
67 
68 	if (crc != expected_crc) {
69 		erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
70 			  crc, expected_crc);
71 		return -EBADMSG;
72 	}
73 	return 0;
74 }
75 
76 static void erofs_inode_init_once(void *ptr)
77 {
78 	struct erofs_inode *vi = ptr;
79 
80 	inode_init_once(&vi->vfs_inode);
81 }
82 
83 static struct inode *erofs_alloc_inode(struct super_block *sb)
84 {
85 	struct erofs_inode *vi =
86 		kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL);
87 
88 	if (!vi)
89 		return NULL;
90 
91 	/* zero out everything except vfs_inode */
92 	memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
93 	return &vi->vfs_inode;
94 }
95 
96 static void erofs_free_inode(struct inode *inode)
97 {
98 	struct erofs_inode *vi = EROFS_I(inode);
99 
100 	/* be careful of RCU symlink path */
101 	if (inode->i_op == &erofs_fast_symlink_iops)
102 		kfree(inode->i_link);
103 	kfree(vi->xattr_shared_xattrs);
104 
105 	kmem_cache_free(erofs_inode_cachep, vi);
106 }
107 
108 static bool check_layout_compatibility(struct super_block *sb,
109 				       struct erofs_super_block *dsb)
110 {
111 	const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
112 
113 	EROFS_SB(sb)->feature_incompat = feature;
114 
115 	/* check if current kernel meets all mandatory requirements */
116 	if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
117 		erofs_err(sb,
118 			  "unidentified incompatible feature %x, please upgrade kernel version",
119 			   feature & ~EROFS_ALL_FEATURE_INCOMPAT);
120 		return false;
121 	}
122 	return true;
123 }
124 
125 #ifdef CONFIG_EROFS_FS_ZIP
126 /* read variable-sized metadata, offset will be aligned by 4-byte */
127 static void *erofs_read_metadata(struct super_block *sb, struct page **pagep,
128 				 erofs_off_t *offset, int *lengthp)
129 {
130 	struct page *page = *pagep;
131 	u8 *buffer, *ptr;
132 	int len, i, cnt;
133 	erofs_blk_t blk;
134 
135 	*offset = round_up(*offset, 4);
136 	blk = erofs_blknr(*offset);
137 
138 	if (!page || page->index != blk) {
139 		if (page) {
140 			unlock_page(page);
141 			put_page(page);
142 		}
143 		page = erofs_get_meta_page(sb, blk);
144 		if (IS_ERR(page))
145 			goto err_nullpage;
146 	}
147 
148 	ptr = kmap(page);
149 	len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(*offset)]);
150 	if (!len)
151 		len = U16_MAX + 1;
152 	buffer = kmalloc(len, GFP_KERNEL);
153 	if (!buffer) {
154 		buffer = ERR_PTR(-ENOMEM);
155 		goto out;
156 	}
157 	*offset += sizeof(__le16);
158 	*lengthp = len;
159 
160 	for (i = 0; i < len; i += cnt) {
161 		cnt = min(EROFS_BLKSIZ - (int)erofs_blkoff(*offset), len - i);
162 		blk = erofs_blknr(*offset);
163 
164 		if (!page || page->index != blk) {
165 			if (page) {
166 				kunmap(page);
167 				unlock_page(page);
168 				put_page(page);
169 			}
170 			page = erofs_get_meta_page(sb, blk);
171 			if (IS_ERR(page)) {
172 				kfree(buffer);
173 				goto err_nullpage;
174 			}
175 			ptr = kmap(page);
176 		}
177 		memcpy(buffer + i, ptr + erofs_blkoff(*offset), cnt);
178 		*offset += cnt;
179 	}
180 out:
181 	kunmap(page);
182 	*pagep = page;
183 	return buffer;
184 err_nullpage:
185 	*pagep = NULL;
186 	return page;
187 }
188 
189 static int erofs_load_compr_cfgs(struct super_block *sb,
190 				 struct erofs_super_block *dsb)
191 {
192 	struct erofs_sb_info *sbi;
193 	struct page *page;
194 	unsigned int algs, alg;
195 	erofs_off_t offset;
196 	int size, ret;
197 
198 	sbi = EROFS_SB(sb);
199 	sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
200 
201 	if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
202 		erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
203 			  sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
204 		return -EINVAL;
205 	}
206 
207 	offset = EROFS_SUPER_OFFSET + sbi->sb_size;
208 	page = NULL;
209 	alg = 0;
210 	ret = 0;
211 
212 	for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
213 		void *data;
214 
215 		if (!(algs & 1))
216 			continue;
217 
218 		data = erofs_read_metadata(sb, &page, &offset, &size);
219 		if (IS_ERR(data)) {
220 			ret = PTR_ERR(data);
221 			goto err;
222 		}
223 
224 		switch (alg) {
225 		case Z_EROFS_COMPRESSION_LZ4:
226 			ret = z_erofs_load_lz4_config(sb, dsb, data, size);
227 			break;
228 		case Z_EROFS_COMPRESSION_LZMA:
229 			ret = z_erofs_load_lzma_config(sb, dsb, data, size);
230 			break;
231 		default:
232 			DBG_BUGON(1);
233 			ret = -EFAULT;
234 		}
235 		kfree(data);
236 		if (ret)
237 			goto err;
238 	}
239 err:
240 	if (page) {
241 		unlock_page(page);
242 		put_page(page);
243 	}
244 	return ret;
245 }
246 #else
247 static int erofs_load_compr_cfgs(struct super_block *sb,
248 				 struct erofs_super_block *dsb)
249 {
250 	if (dsb->u1.available_compr_algs) {
251 		erofs_err(sb, "try to load compressed fs when compression is disabled");
252 		return -EINVAL;
253 	}
254 	return 0;
255 }
256 #endif
257 
258 static int erofs_init_devices(struct super_block *sb,
259 			      struct erofs_super_block *dsb)
260 {
261 	struct erofs_sb_info *sbi = EROFS_SB(sb);
262 	unsigned int ondisk_extradevs;
263 	erofs_off_t pos;
264 	struct page *page = NULL;
265 	struct erofs_device_info *dif;
266 	struct erofs_deviceslot *dis;
267 	void *ptr;
268 	int id, err = 0;
269 
270 	sbi->total_blocks = sbi->primarydevice_blocks;
271 	if (!erofs_sb_has_device_table(sbi))
272 		ondisk_extradevs = 0;
273 	else
274 		ondisk_extradevs = le16_to_cpu(dsb->extra_devices);
275 
276 	if (ondisk_extradevs != sbi->devs->extra_devices) {
277 		erofs_err(sb, "extra devices don't match (ondisk %u, given %u)",
278 			  ondisk_extradevs, sbi->devs->extra_devices);
279 		return -EINVAL;
280 	}
281 	if (!ondisk_extradevs)
282 		return 0;
283 
284 	sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
285 	pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
286 	down_read(&sbi->devs->rwsem);
287 	idr_for_each_entry(&sbi->devs->tree, dif, id) {
288 		erofs_blk_t blk = erofs_blknr(pos);
289 		struct block_device *bdev;
290 
291 		if (!page || page->index != blk) {
292 			if (page) {
293 				kunmap(page);
294 				unlock_page(page);
295 				put_page(page);
296 			}
297 
298 			page = erofs_get_meta_page(sb, blk);
299 			if (IS_ERR(page)) {
300 				up_read(&sbi->devs->rwsem);
301 				return PTR_ERR(page);
302 			}
303 			ptr = kmap(page);
304 		}
305 		dis = ptr + erofs_blkoff(pos);
306 
307 		bdev = blkdev_get_by_path(dif->path,
308 					  FMODE_READ | FMODE_EXCL,
309 					  sb->s_type);
310 		if (IS_ERR(bdev)) {
311 			err = PTR_ERR(bdev);
312 			goto err_out;
313 		}
314 		dif->bdev = bdev;
315 		dif->dax_dev = fs_dax_get_by_bdev(bdev);
316 		dif->blocks = le32_to_cpu(dis->blocks);
317 		dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
318 		sbi->total_blocks += dif->blocks;
319 		pos += EROFS_DEVT_SLOT_SIZE;
320 	}
321 err_out:
322 	up_read(&sbi->devs->rwsem);
323 	if (page) {
324 		kunmap(page);
325 		unlock_page(page);
326 		put_page(page);
327 	}
328 	return err;
329 }
330 
331 static int erofs_read_superblock(struct super_block *sb)
332 {
333 	struct erofs_sb_info *sbi;
334 	struct page *page;
335 	struct erofs_super_block *dsb;
336 	unsigned int blkszbits;
337 	void *data;
338 	int ret;
339 
340 	page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL);
341 	if (IS_ERR(page)) {
342 		erofs_err(sb, "cannot read erofs superblock");
343 		return PTR_ERR(page);
344 	}
345 
346 	sbi = EROFS_SB(sb);
347 
348 	data = kmap(page);
349 	dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
350 
351 	ret = -EINVAL;
352 	if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
353 		erofs_err(sb, "cannot find valid erofs superblock");
354 		goto out;
355 	}
356 
357 	sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
358 	if (erofs_sb_has_sb_chksum(sbi)) {
359 		ret = erofs_superblock_csum_verify(sb, data);
360 		if (ret)
361 			goto out;
362 	}
363 
364 	ret = -EINVAL;
365 	blkszbits = dsb->blkszbits;
366 	/* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
367 	if (blkszbits != LOG_BLOCK_SIZE) {
368 		erofs_err(sb, "blkszbits %u isn't supported on this platform",
369 			  blkszbits);
370 		goto out;
371 	}
372 
373 	if (!check_layout_compatibility(sb, dsb))
374 		goto out;
375 
376 	sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
377 	if (sbi->sb_size > EROFS_BLKSIZ) {
378 		erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
379 			  sbi->sb_size);
380 		goto out;
381 	}
382 	sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
383 	sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
384 #ifdef CONFIG_EROFS_FS_XATTR
385 	sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
386 #endif
387 	sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
388 	sbi->root_nid = le16_to_cpu(dsb->root_nid);
389 	sbi->inos = le64_to_cpu(dsb->inos);
390 
391 	sbi->build_time = le64_to_cpu(dsb->build_time);
392 	sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
393 
394 	memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
395 
396 	ret = strscpy(sbi->volume_name, dsb->volume_name,
397 		      sizeof(dsb->volume_name));
398 	if (ret < 0) {	/* -E2BIG */
399 		erofs_err(sb, "bad volume name without NIL terminator");
400 		ret = -EFSCORRUPTED;
401 		goto out;
402 	}
403 
404 	/* parse on-disk compression configurations */
405 	if (erofs_sb_has_compr_cfgs(sbi))
406 		ret = erofs_load_compr_cfgs(sb, dsb);
407 	else
408 		ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
409 	if (ret < 0)
410 		goto out;
411 
412 	/* handle multiple devices */
413 	ret = erofs_init_devices(sb, dsb);
414 out:
415 	kunmap(page);
416 	put_page(page);
417 	return ret;
418 }
419 
420 /* set up default EROFS parameters */
421 static void erofs_default_options(struct erofs_fs_context *ctx)
422 {
423 #ifdef CONFIG_EROFS_FS_ZIP
424 	ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
425 	ctx->opt.max_sync_decompress_pages = 3;
426 	ctx->opt.readahead_sync_decompress = false;
427 #endif
428 #ifdef CONFIG_EROFS_FS_XATTR
429 	set_opt(&ctx->opt, XATTR_USER);
430 #endif
431 #ifdef CONFIG_EROFS_FS_POSIX_ACL
432 	set_opt(&ctx->opt, POSIX_ACL);
433 #endif
434 }
435 
436 enum {
437 	Opt_user_xattr,
438 	Opt_acl,
439 	Opt_cache_strategy,
440 	Opt_dax,
441 	Opt_dax_enum,
442 	Opt_device,
443 	Opt_err
444 };
445 
446 static const struct constant_table erofs_param_cache_strategy[] = {
447 	{"disabled",	EROFS_ZIP_CACHE_DISABLED},
448 	{"readahead",	EROFS_ZIP_CACHE_READAHEAD},
449 	{"readaround",	EROFS_ZIP_CACHE_READAROUND},
450 	{}
451 };
452 
453 static const struct constant_table erofs_dax_param_enums[] = {
454 	{"always",	EROFS_MOUNT_DAX_ALWAYS},
455 	{"never",	EROFS_MOUNT_DAX_NEVER},
456 	{}
457 };
458 
459 static const struct fs_parameter_spec erofs_fs_parameters[] = {
460 	fsparam_flag_no("user_xattr",	Opt_user_xattr),
461 	fsparam_flag_no("acl",		Opt_acl),
462 	fsparam_enum("cache_strategy",	Opt_cache_strategy,
463 		     erofs_param_cache_strategy),
464 	fsparam_flag("dax",             Opt_dax),
465 	fsparam_enum("dax",		Opt_dax_enum, erofs_dax_param_enums),
466 	fsparam_string("device",	Opt_device),
467 	{}
468 };
469 
470 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
471 {
472 #ifdef CONFIG_FS_DAX
473 	struct erofs_fs_context *ctx = fc->fs_private;
474 
475 	switch (mode) {
476 	case EROFS_MOUNT_DAX_ALWAYS:
477 		warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
478 		set_opt(&ctx->opt, DAX_ALWAYS);
479 		clear_opt(&ctx->opt, DAX_NEVER);
480 		return true;
481 	case EROFS_MOUNT_DAX_NEVER:
482 		set_opt(&ctx->opt, DAX_NEVER);
483 		clear_opt(&ctx->opt, DAX_ALWAYS);
484 		return true;
485 	default:
486 		DBG_BUGON(1);
487 		return false;
488 	}
489 #else
490 	errorfc(fc, "dax options not supported");
491 	return false;
492 #endif
493 }
494 
495 static int erofs_fc_parse_param(struct fs_context *fc,
496 				struct fs_parameter *param)
497 {
498 	struct erofs_fs_context *ctx = fc->fs_private;
499 	struct fs_parse_result result;
500 	struct erofs_device_info *dif;
501 	int opt, ret;
502 
503 	opt = fs_parse(fc, erofs_fs_parameters, param, &result);
504 	if (opt < 0)
505 		return opt;
506 
507 	switch (opt) {
508 	case Opt_user_xattr:
509 #ifdef CONFIG_EROFS_FS_XATTR
510 		if (result.boolean)
511 			set_opt(&ctx->opt, XATTR_USER);
512 		else
513 			clear_opt(&ctx->opt, XATTR_USER);
514 #else
515 		errorfc(fc, "{,no}user_xattr options not supported");
516 #endif
517 		break;
518 	case Opt_acl:
519 #ifdef CONFIG_EROFS_FS_POSIX_ACL
520 		if (result.boolean)
521 			set_opt(&ctx->opt, POSIX_ACL);
522 		else
523 			clear_opt(&ctx->opt, POSIX_ACL);
524 #else
525 		errorfc(fc, "{,no}acl options not supported");
526 #endif
527 		break;
528 	case Opt_cache_strategy:
529 #ifdef CONFIG_EROFS_FS_ZIP
530 		ctx->opt.cache_strategy = result.uint_32;
531 #else
532 		errorfc(fc, "compression not supported, cache_strategy ignored");
533 #endif
534 		break;
535 	case Opt_dax:
536 		if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
537 			return -EINVAL;
538 		break;
539 	case Opt_dax_enum:
540 		if (!erofs_fc_set_dax_mode(fc, result.uint_32))
541 			return -EINVAL;
542 		break;
543 	case Opt_device:
544 		dif = kzalloc(sizeof(*dif), GFP_KERNEL);
545 		if (!dif)
546 			return -ENOMEM;
547 		dif->path = kstrdup(param->string, GFP_KERNEL);
548 		if (!dif->path) {
549 			kfree(dif);
550 			return -ENOMEM;
551 		}
552 		down_write(&ctx->devs->rwsem);
553 		ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
554 		up_write(&ctx->devs->rwsem);
555 		if (ret < 0) {
556 			kfree(dif->path);
557 			kfree(dif);
558 			return ret;
559 		}
560 		++ctx->devs->extra_devices;
561 		break;
562 	default:
563 		return -ENOPARAM;
564 	}
565 	return 0;
566 }
567 
568 #ifdef CONFIG_EROFS_FS_ZIP
569 static const struct address_space_operations managed_cache_aops;
570 
571 static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
572 {
573 	int ret = 1;	/* 0 - busy */
574 	struct address_space *const mapping = page->mapping;
575 
576 	DBG_BUGON(!PageLocked(page));
577 	DBG_BUGON(mapping->a_ops != &managed_cache_aops);
578 
579 	if (PagePrivate(page))
580 		ret = erofs_try_to_free_cached_page(page);
581 
582 	return ret;
583 }
584 
585 static void erofs_managed_cache_invalidatepage(struct page *page,
586 					       unsigned int offset,
587 					       unsigned int length)
588 {
589 	const unsigned int stop = length + offset;
590 
591 	DBG_BUGON(!PageLocked(page));
592 
593 	/* Check for potential overflow in debug mode */
594 	DBG_BUGON(stop > PAGE_SIZE || stop < length);
595 
596 	if (offset == 0 && stop == PAGE_SIZE)
597 		while (!erofs_managed_cache_releasepage(page, GFP_NOFS))
598 			cond_resched();
599 }
600 
601 static const struct address_space_operations managed_cache_aops = {
602 	.releasepage = erofs_managed_cache_releasepage,
603 	.invalidatepage = erofs_managed_cache_invalidatepage,
604 };
605 
606 static int erofs_init_managed_cache(struct super_block *sb)
607 {
608 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
609 	struct inode *const inode = new_inode(sb);
610 
611 	if (!inode)
612 		return -ENOMEM;
613 
614 	set_nlink(inode, 1);
615 	inode->i_size = OFFSET_MAX;
616 
617 	inode->i_mapping->a_ops = &managed_cache_aops;
618 	mapping_set_gfp_mask(inode->i_mapping,
619 			     GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
620 	sbi->managed_cache = inode;
621 	return 0;
622 }
623 #else
624 static int erofs_init_managed_cache(struct super_block *sb) { return 0; }
625 #endif
626 
627 static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
628 {
629 	struct inode *inode;
630 	struct erofs_sb_info *sbi;
631 	struct erofs_fs_context *ctx = fc->fs_private;
632 	int err;
633 
634 	sb->s_magic = EROFS_SUPER_MAGIC;
635 
636 	if (!sb_set_blocksize(sb, EROFS_BLKSIZ)) {
637 		erofs_err(sb, "failed to set erofs blksize");
638 		return -EINVAL;
639 	}
640 
641 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
642 	if (!sbi)
643 		return -ENOMEM;
644 
645 	sb->s_fs_info = sbi;
646 	sbi->opt = ctx->opt;
647 	sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
648 	sbi->devs = ctx->devs;
649 	ctx->devs = NULL;
650 
651 	err = erofs_read_superblock(sb);
652 	if (err)
653 		return err;
654 
655 	if (test_opt(&sbi->opt, DAX_ALWAYS) &&
656 	    !dax_supported(sbi->dax_dev, sb->s_bdev, EROFS_BLKSIZ, 0, bdev_nr_sectors(sb->s_bdev))) {
657 		errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
658 		clear_opt(&sbi->opt, DAX_ALWAYS);
659 	}
660 	sb->s_flags |= SB_RDONLY | SB_NOATIME;
661 	sb->s_maxbytes = MAX_LFS_FILESIZE;
662 	sb->s_time_gran = 1;
663 
664 	sb->s_op = &erofs_sops;
665 	sb->s_xattr = erofs_xattr_handlers;
666 
667 	if (test_opt(&sbi->opt, POSIX_ACL))
668 		sb->s_flags |= SB_POSIXACL;
669 	else
670 		sb->s_flags &= ~SB_POSIXACL;
671 
672 #ifdef CONFIG_EROFS_FS_ZIP
673 	xa_init(&sbi->managed_pslots);
674 #endif
675 
676 	/* get the root inode */
677 	inode = erofs_iget(sb, ROOT_NID(sbi), true);
678 	if (IS_ERR(inode))
679 		return PTR_ERR(inode);
680 
681 	if (!S_ISDIR(inode->i_mode)) {
682 		erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
683 			  ROOT_NID(sbi), inode->i_mode);
684 		iput(inode);
685 		return -EINVAL;
686 	}
687 
688 	sb->s_root = d_make_root(inode);
689 	if (!sb->s_root)
690 		return -ENOMEM;
691 
692 	erofs_shrinker_register(sb);
693 	/* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
694 	err = erofs_init_managed_cache(sb);
695 	if (err)
696 		return err;
697 
698 	erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi));
699 	return 0;
700 }
701 
702 static int erofs_fc_get_tree(struct fs_context *fc)
703 {
704 	return get_tree_bdev(fc, erofs_fc_fill_super);
705 }
706 
707 static int erofs_fc_reconfigure(struct fs_context *fc)
708 {
709 	struct super_block *sb = fc->root->d_sb;
710 	struct erofs_sb_info *sbi = EROFS_SB(sb);
711 	struct erofs_fs_context *ctx = fc->fs_private;
712 
713 	DBG_BUGON(!sb_rdonly(sb));
714 
715 	if (test_opt(&ctx->opt, POSIX_ACL))
716 		fc->sb_flags |= SB_POSIXACL;
717 	else
718 		fc->sb_flags &= ~SB_POSIXACL;
719 
720 	sbi->opt = ctx->opt;
721 
722 	fc->sb_flags |= SB_RDONLY;
723 	return 0;
724 }
725 
726 static int erofs_release_device_info(int id, void *ptr, void *data)
727 {
728 	struct erofs_device_info *dif = ptr;
729 
730 	fs_put_dax(dif->dax_dev);
731 	if (dif->bdev)
732 		blkdev_put(dif->bdev, FMODE_READ | FMODE_EXCL);
733 	kfree(dif->path);
734 	kfree(dif);
735 	return 0;
736 }
737 
738 static void erofs_free_dev_context(struct erofs_dev_context *devs)
739 {
740 	if (!devs)
741 		return;
742 	idr_for_each(&devs->tree, &erofs_release_device_info, NULL);
743 	idr_destroy(&devs->tree);
744 	kfree(devs);
745 }
746 
747 static void erofs_fc_free(struct fs_context *fc)
748 {
749 	struct erofs_fs_context *ctx = fc->fs_private;
750 
751 	erofs_free_dev_context(ctx->devs);
752 	kfree(ctx);
753 }
754 
755 static const struct fs_context_operations erofs_context_ops = {
756 	.parse_param	= erofs_fc_parse_param,
757 	.get_tree       = erofs_fc_get_tree,
758 	.reconfigure    = erofs_fc_reconfigure,
759 	.free		= erofs_fc_free,
760 };
761 
762 static int erofs_init_fs_context(struct fs_context *fc)
763 {
764 	struct erofs_fs_context *ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
765 
766 	if (!ctx)
767 		return -ENOMEM;
768 	ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
769 	if (!ctx->devs) {
770 		kfree(ctx);
771 		return -ENOMEM;
772 	}
773 	fc->fs_private = ctx;
774 
775 	idr_init(&ctx->devs->tree);
776 	init_rwsem(&ctx->devs->rwsem);
777 	erofs_default_options(ctx);
778 	fc->ops = &erofs_context_ops;
779 	return 0;
780 }
781 
782 /*
783  * could be triggered after deactivate_locked_super()
784  * is called, thus including umount and failed to initialize.
785  */
786 static void erofs_kill_sb(struct super_block *sb)
787 {
788 	struct erofs_sb_info *sbi;
789 
790 	WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
791 
792 	kill_block_super(sb);
793 
794 	sbi = EROFS_SB(sb);
795 	if (!sbi)
796 		return;
797 
798 	erofs_free_dev_context(sbi->devs);
799 	fs_put_dax(sbi->dax_dev);
800 	kfree(sbi);
801 	sb->s_fs_info = NULL;
802 }
803 
804 /* called when ->s_root is non-NULL */
805 static void erofs_put_super(struct super_block *sb)
806 {
807 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
808 
809 	DBG_BUGON(!sbi);
810 
811 	erofs_shrinker_unregister(sb);
812 #ifdef CONFIG_EROFS_FS_ZIP
813 	iput(sbi->managed_cache);
814 	sbi->managed_cache = NULL;
815 #endif
816 }
817 
818 static struct file_system_type erofs_fs_type = {
819 	.owner          = THIS_MODULE,
820 	.name           = "erofs",
821 	.init_fs_context = erofs_init_fs_context,
822 	.kill_sb        = erofs_kill_sb,
823 	.fs_flags       = FS_REQUIRES_DEV,
824 };
825 MODULE_ALIAS_FS("erofs");
826 
827 static int __init erofs_module_init(void)
828 {
829 	int err;
830 
831 	erofs_check_ondisk_layout_definitions();
832 
833 	erofs_inode_cachep = kmem_cache_create("erofs_inode",
834 					       sizeof(struct erofs_inode), 0,
835 					       SLAB_RECLAIM_ACCOUNT,
836 					       erofs_inode_init_once);
837 	if (!erofs_inode_cachep) {
838 		err = -ENOMEM;
839 		goto icache_err;
840 	}
841 
842 	err = erofs_init_shrinker();
843 	if (err)
844 		goto shrinker_err;
845 
846 	err = z_erofs_lzma_init();
847 	if (err)
848 		goto lzma_err;
849 
850 	erofs_pcpubuf_init();
851 	err = z_erofs_init_zip_subsystem();
852 	if (err)
853 		goto zip_err;
854 
855 	err = register_filesystem(&erofs_fs_type);
856 	if (err)
857 		goto fs_err;
858 
859 	return 0;
860 
861 fs_err:
862 	z_erofs_exit_zip_subsystem();
863 zip_err:
864 	z_erofs_lzma_exit();
865 lzma_err:
866 	erofs_exit_shrinker();
867 shrinker_err:
868 	kmem_cache_destroy(erofs_inode_cachep);
869 icache_err:
870 	return err;
871 }
872 
873 static void __exit erofs_module_exit(void)
874 {
875 	unregister_filesystem(&erofs_fs_type);
876 
877 	/* Ensure all RCU free inodes / pclusters are safe to be destroyed. */
878 	rcu_barrier();
879 
880 	z_erofs_exit_zip_subsystem();
881 	z_erofs_lzma_exit();
882 	erofs_exit_shrinker();
883 	kmem_cache_destroy(erofs_inode_cachep);
884 	erofs_pcpubuf_exit();
885 }
886 
887 /* get filesystem statistics */
888 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
889 {
890 	struct super_block *sb = dentry->d_sb;
891 	struct erofs_sb_info *sbi = EROFS_SB(sb);
892 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
893 
894 	buf->f_type = sb->s_magic;
895 	buf->f_bsize = EROFS_BLKSIZ;
896 	buf->f_blocks = sbi->total_blocks;
897 	buf->f_bfree = buf->f_bavail = 0;
898 
899 	buf->f_files = ULLONG_MAX;
900 	buf->f_ffree = ULLONG_MAX - sbi->inos;
901 
902 	buf->f_namelen = EROFS_NAME_LEN;
903 
904 	buf->f_fsid    = u64_to_fsid(id);
905 	return 0;
906 }
907 
908 static int erofs_show_options(struct seq_file *seq, struct dentry *root)
909 {
910 	struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
911 	struct erofs_mount_opts *opt = &sbi->opt;
912 
913 #ifdef CONFIG_EROFS_FS_XATTR
914 	if (test_opt(opt, XATTR_USER))
915 		seq_puts(seq, ",user_xattr");
916 	else
917 		seq_puts(seq, ",nouser_xattr");
918 #endif
919 #ifdef CONFIG_EROFS_FS_POSIX_ACL
920 	if (test_opt(opt, POSIX_ACL))
921 		seq_puts(seq, ",acl");
922 	else
923 		seq_puts(seq, ",noacl");
924 #endif
925 #ifdef CONFIG_EROFS_FS_ZIP
926 	if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
927 		seq_puts(seq, ",cache_strategy=disabled");
928 	else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
929 		seq_puts(seq, ",cache_strategy=readahead");
930 	else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
931 		seq_puts(seq, ",cache_strategy=readaround");
932 #endif
933 	if (test_opt(opt, DAX_ALWAYS))
934 		seq_puts(seq, ",dax=always");
935 	if (test_opt(opt, DAX_NEVER))
936 		seq_puts(seq, ",dax=never");
937 	return 0;
938 }
939 
940 const struct super_operations erofs_sops = {
941 	.put_super = erofs_put_super,
942 	.alloc_inode = erofs_alloc_inode,
943 	.free_inode = erofs_free_inode,
944 	.statfs = erofs_statfs,
945 	.show_options = erofs_show_options,
946 };
947 
948 module_init(erofs_module_init);
949 module_exit(erofs_module_exit);
950 
951 MODULE_DESCRIPTION("Enhanced ROM File System");
952 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
953 MODULE_LICENSE("GPL");
954