xref: /linux/fs/erofs/super.c (revision 2363088eba2ecccfb643725e4864af73c4226a04)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include <linux/module.h>
8 #include <linux/statfs.h>
9 #include <linux/parser.h>
10 #include <linux/seq_file.h>
11 #include <linux/crc32c.h>
12 #include <linux/fs_context.h>
13 #include <linux/fs_parser.h>
14 #include <linux/dax.h>
15 #include <linux/exportfs.h>
16 #include "xattr.h"
17 
18 #define CREATE_TRACE_POINTS
19 #include <trace/events/erofs.h>
20 
21 static struct kmem_cache *erofs_inode_cachep __read_mostly;
22 struct file_system_type erofs_fs_type;
23 
24 void _erofs_err(struct super_block *sb, const char *function,
25 		const char *fmt, ...)
26 {
27 	struct va_format vaf;
28 	va_list args;
29 
30 	va_start(args, fmt);
31 
32 	vaf.fmt = fmt;
33 	vaf.va = &args;
34 
35 	pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf);
36 	va_end(args);
37 }
38 
39 void _erofs_info(struct super_block *sb, const char *function,
40 		 const char *fmt, ...)
41 {
42 	struct va_format vaf;
43 	va_list args;
44 
45 	va_start(args, fmt);
46 
47 	vaf.fmt = fmt;
48 	vaf.va = &args;
49 
50 	pr_info("(device %s): %pV", sb->s_id, &vaf);
51 	va_end(args);
52 }
53 
54 static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
55 {
56 	size_t len = 1 << EROFS_SB(sb)->blkszbits;
57 	struct erofs_super_block *dsb;
58 	u32 expected_crc, crc;
59 
60 	if (len > EROFS_SUPER_OFFSET)
61 		len -= EROFS_SUPER_OFFSET;
62 
63 	dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL);
64 	if (!dsb)
65 		return -ENOMEM;
66 
67 	expected_crc = le32_to_cpu(dsb->checksum);
68 	dsb->checksum = 0;
69 	/* to allow for x86 boot sectors and other oddities. */
70 	crc = crc32c(~0, dsb, len);
71 	kfree(dsb);
72 
73 	if (crc != expected_crc) {
74 		erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
75 			  crc, expected_crc);
76 		return -EBADMSG;
77 	}
78 	return 0;
79 }
80 
81 static void erofs_inode_init_once(void *ptr)
82 {
83 	struct erofs_inode *vi = ptr;
84 
85 	inode_init_once(&vi->vfs_inode);
86 }
87 
88 static struct inode *erofs_alloc_inode(struct super_block *sb)
89 {
90 	struct erofs_inode *vi =
91 		alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL);
92 
93 	if (!vi)
94 		return NULL;
95 
96 	/* zero out everything except vfs_inode */
97 	memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
98 	return &vi->vfs_inode;
99 }
100 
101 static void erofs_free_inode(struct inode *inode)
102 {
103 	struct erofs_inode *vi = EROFS_I(inode);
104 
105 	/* be careful of RCU symlink path */
106 	if (inode->i_op == &erofs_fast_symlink_iops)
107 		kfree(inode->i_link);
108 	kfree(vi->xattr_shared_xattrs);
109 
110 	kmem_cache_free(erofs_inode_cachep, vi);
111 }
112 
113 static bool check_layout_compatibility(struct super_block *sb,
114 				       struct erofs_super_block *dsb)
115 {
116 	const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
117 
118 	EROFS_SB(sb)->feature_incompat = feature;
119 
120 	/* check if current kernel meets all mandatory requirements */
121 	if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
122 		erofs_err(sb,
123 			  "unidentified incompatible feature %x, please upgrade kernel version",
124 			   feature & ~EROFS_ALL_FEATURE_INCOMPAT);
125 		return false;
126 	}
127 	return true;
128 }
129 
130 /* read variable-sized metadata, offset will be aligned by 4-byte */
131 void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
132 			  erofs_off_t *offset, int *lengthp)
133 {
134 	u8 *buffer, *ptr;
135 	int len, i, cnt;
136 
137 	*offset = round_up(*offset, 4);
138 	ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
139 	if (IS_ERR(ptr))
140 		return ptr;
141 
142 	len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]);
143 	if (!len)
144 		len = U16_MAX + 1;
145 	buffer = kmalloc(len, GFP_KERNEL);
146 	if (!buffer)
147 		return ERR_PTR(-ENOMEM);
148 	*offset += sizeof(__le16);
149 	*lengthp = len;
150 
151 	for (i = 0; i < len; i += cnt) {
152 		cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset),
153 			    len - i);
154 		ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
155 		if (IS_ERR(ptr)) {
156 			kfree(buffer);
157 			return ptr;
158 		}
159 		memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt);
160 		*offset += cnt;
161 	}
162 	return buffer;
163 }
164 
165 #ifdef CONFIG_EROFS_FS_ZIP
166 static int erofs_load_compr_cfgs(struct super_block *sb,
167 				 struct erofs_super_block *dsb)
168 {
169 	struct erofs_sb_info *sbi = EROFS_SB(sb);
170 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
171 	unsigned int algs, alg;
172 	erofs_off_t offset;
173 	int size, ret = 0;
174 
175 	sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
176 	if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
177 		erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
178 			  sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
179 		return -EINVAL;
180 	}
181 
182 	erofs_init_metabuf(&buf, sb);
183 	offset = EROFS_SUPER_OFFSET + sbi->sb_size;
184 	alg = 0;
185 	for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
186 		void *data;
187 
188 		if (!(algs & 1))
189 			continue;
190 
191 		data = erofs_read_metadata(sb, &buf, &offset, &size);
192 		if (IS_ERR(data)) {
193 			ret = PTR_ERR(data);
194 			break;
195 		}
196 
197 		switch (alg) {
198 		case Z_EROFS_COMPRESSION_LZ4:
199 			ret = z_erofs_load_lz4_config(sb, dsb, data, size);
200 			break;
201 		case Z_EROFS_COMPRESSION_LZMA:
202 			ret = z_erofs_load_lzma_config(sb, dsb, data, size);
203 			break;
204 		default:
205 			DBG_BUGON(1);
206 			ret = -EFAULT;
207 		}
208 		kfree(data);
209 		if (ret)
210 			break;
211 	}
212 	erofs_put_metabuf(&buf);
213 	return ret;
214 }
215 #else
216 static int erofs_load_compr_cfgs(struct super_block *sb,
217 				 struct erofs_super_block *dsb)
218 {
219 	if (dsb->u1.available_compr_algs) {
220 		erofs_err(sb, "try to load compressed fs when compression is disabled");
221 		return -EINVAL;
222 	}
223 	return 0;
224 }
225 #endif
226 
227 static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
228 			     struct erofs_device_info *dif, erofs_off_t *pos)
229 {
230 	struct erofs_sb_info *sbi = EROFS_SB(sb);
231 	struct erofs_fscache *fscache;
232 	struct erofs_deviceslot *dis;
233 	struct block_device *bdev;
234 	void *ptr;
235 
236 	ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
237 	if (IS_ERR(ptr))
238 		return PTR_ERR(ptr);
239 	dis = ptr + erofs_blkoff(sb, *pos);
240 
241 	if (!dif->path) {
242 		if (!dis->tag[0]) {
243 			erofs_err(sb, "empty device tag @ pos %llu", *pos);
244 			return -EINVAL;
245 		}
246 		dif->path = kmemdup_nul(dis->tag, sizeof(dis->tag), GFP_KERNEL);
247 		if (!dif->path)
248 			return -ENOMEM;
249 	}
250 
251 	if (erofs_is_fscache_mode(sb)) {
252 		fscache = erofs_fscache_register_cookie(sb, dif->path, 0);
253 		if (IS_ERR(fscache))
254 			return PTR_ERR(fscache);
255 		dif->fscache = fscache;
256 	} else if (!sbi->devs->flatdev) {
257 		bdev = blkdev_get_by_path(dif->path, BLK_OPEN_READ, sb->s_type,
258 					  NULL);
259 		if (IS_ERR(bdev))
260 			return PTR_ERR(bdev);
261 		dif->bdev = bdev;
262 		dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off,
263 						  NULL, NULL);
264 	}
265 
266 	dif->blocks = le32_to_cpu(dis->blocks);
267 	dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
268 	sbi->total_blocks += dif->blocks;
269 	*pos += EROFS_DEVT_SLOT_SIZE;
270 	return 0;
271 }
272 
273 static int erofs_scan_devices(struct super_block *sb,
274 			      struct erofs_super_block *dsb)
275 {
276 	struct erofs_sb_info *sbi = EROFS_SB(sb);
277 	unsigned int ondisk_extradevs;
278 	erofs_off_t pos;
279 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
280 	struct erofs_device_info *dif;
281 	int id, err = 0;
282 
283 	sbi->total_blocks = sbi->primarydevice_blocks;
284 	if (!erofs_sb_has_device_table(sbi))
285 		ondisk_extradevs = 0;
286 	else
287 		ondisk_extradevs = le16_to_cpu(dsb->extra_devices);
288 
289 	if (sbi->devs->extra_devices &&
290 	    ondisk_extradevs != sbi->devs->extra_devices) {
291 		erofs_err(sb, "extra devices don't match (ondisk %u, given %u)",
292 			  ondisk_extradevs, sbi->devs->extra_devices);
293 		return -EINVAL;
294 	}
295 	if (!ondisk_extradevs)
296 		return 0;
297 
298 	if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb))
299 		sbi->devs->flatdev = true;
300 
301 	sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
302 	pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
303 	down_read(&sbi->devs->rwsem);
304 	if (sbi->devs->extra_devices) {
305 		idr_for_each_entry(&sbi->devs->tree, dif, id) {
306 			err = erofs_init_device(&buf, sb, dif, &pos);
307 			if (err)
308 				break;
309 		}
310 	} else {
311 		for (id = 0; id < ondisk_extradevs; id++) {
312 			dif = kzalloc(sizeof(*dif), GFP_KERNEL);
313 			if (!dif) {
314 				err = -ENOMEM;
315 				break;
316 			}
317 
318 			err = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
319 			if (err < 0) {
320 				kfree(dif);
321 				break;
322 			}
323 			++sbi->devs->extra_devices;
324 
325 			err = erofs_init_device(&buf, sb, dif, &pos);
326 			if (err)
327 				break;
328 		}
329 	}
330 	up_read(&sbi->devs->rwsem);
331 	erofs_put_metabuf(&buf);
332 	return err;
333 }
334 
335 static int erofs_read_superblock(struct super_block *sb)
336 {
337 	struct erofs_sb_info *sbi;
338 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
339 	struct erofs_super_block *dsb;
340 	void *data;
341 	int ret;
342 
343 	data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP);
344 	if (IS_ERR(data)) {
345 		erofs_err(sb, "cannot read erofs superblock");
346 		return PTR_ERR(data);
347 	}
348 
349 	sbi = EROFS_SB(sb);
350 	dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
351 
352 	ret = -EINVAL;
353 	if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
354 		erofs_err(sb, "cannot find valid erofs superblock");
355 		goto out;
356 	}
357 
358 	sbi->blkszbits  = dsb->blkszbits;
359 	if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) {
360 		erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits);
361 		goto out;
362 	}
363 	if (dsb->dirblkbits) {
364 		erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits);
365 		goto out;
366 	}
367 
368 	sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
369 	if (erofs_sb_has_sb_chksum(sbi)) {
370 		ret = erofs_superblock_csum_verify(sb, data);
371 		if (ret)
372 			goto out;
373 	}
374 
375 	ret = -EINVAL;
376 	if (!check_layout_compatibility(sb, dsb))
377 		goto out;
378 
379 	sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
380 	if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) {
381 		erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
382 			  sbi->sb_size);
383 		goto out;
384 	}
385 	sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
386 	sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
387 #ifdef CONFIG_EROFS_FS_XATTR
388 	sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
389 	sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start);
390 	sbi->xattr_prefix_count = dsb->xattr_prefix_count;
391 #endif
392 	sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
393 	sbi->root_nid = le16_to_cpu(dsb->root_nid);
394 	sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
395 	sbi->inos = le64_to_cpu(dsb->inos);
396 
397 	sbi->build_time = le64_to_cpu(dsb->build_time);
398 	sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
399 
400 	memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
401 
402 	ret = strscpy(sbi->volume_name, dsb->volume_name,
403 		      sizeof(dsb->volume_name));
404 	if (ret < 0) {	/* -E2BIG */
405 		erofs_err(sb, "bad volume name without NIL terminator");
406 		ret = -EFSCORRUPTED;
407 		goto out;
408 	}
409 
410 	/* parse on-disk compression configurations */
411 	if (erofs_sb_has_compr_cfgs(sbi))
412 		ret = erofs_load_compr_cfgs(sb, dsb);
413 	else
414 		ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
415 	if (ret < 0)
416 		goto out;
417 
418 	/* handle multiple devices */
419 	ret = erofs_scan_devices(sb, dsb);
420 
421 	if (erofs_is_fscache_mode(sb))
422 		erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!");
423 	if (erofs_sb_has_fragments(sbi))
424 		erofs_info(sb, "EXPERIMENTAL compressed fragments feature in use. Use at your own risk!");
425 	if (erofs_sb_has_dedupe(sbi))
426 		erofs_info(sb, "EXPERIMENTAL global deduplication feature in use. Use at your own risk!");
427 out:
428 	erofs_put_metabuf(&buf);
429 	return ret;
430 }
431 
432 /* set up default EROFS parameters */
433 static void erofs_default_options(struct erofs_fs_context *ctx)
434 {
435 #ifdef CONFIG_EROFS_FS_ZIP
436 	ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
437 	ctx->opt.max_sync_decompress_pages = 3;
438 	ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
439 #endif
440 #ifdef CONFIG_EROFS_FS_XATTR
441 	set_opt(&ctx->opt, XATTR_USER);
442 #endif
443 #ifdef CONFIG_EROFS_FS_POSIX_ACL
444 	set_opt(&ctx->opt, POSIX_ACL);
445 #endif
446 }
447 
448 enum {
449 	Opt_user_xattr,
450 	Opt_acl,
451 	Opt_cache_strategy,
452 	Opt_dax,
453 	Opt_dax_enum,
454 	Opt_device,
455 	Opt_fsid,
456 	Opt_domain_id,
457 	Opt_err
458 };
459 
460 static const struct constant_table erofs_param_cache_strategy[] = {
461 	{"disabled",	EROFS_ZIP_CACHE_DISABLED},
462 	{"readahead",	EROFS_ZIP_CACHE_READAHEAD},
463 	{"readaround",	EROFS_ZIP_CACHE_READAROUND},
464 	{}
465 };
466 
467 static const struct constant_table erofs_dax_param_enums[] = {
468 	{"always",	EROFS_MOUNT_DAX_ALWAYS},
469 	{"never",	EROFS_MOUNT_DAX_NEVER},
470 	{}
471 };
472 
473 static const struct fs_parameter_spec erofs_fs_parameters[] = {
474 	fsparam_flag_no("user_xattr",	Opt_user_xattr),
475 	fsparam_flag_no("acl",		Opt_acl),
476 	fsparam_enum("cache_strategy",	Opt_cache_strategy,
477 		     erofs_param_cache_strategy),
478 	fsparam_flag("dax",             Opt_dax),
479 	fsparam_enum("dax",		Opt_dax_enum, erofs_dax_param_enums),
480 	fsparam_string("device",	Opt_device),
481 	fsparam_string("fsid",		Opt_fsid),
482 	fsparam_string("domain_id",	Opt_domain_id),
483 	{}
484 };
485 
486 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
487 {
488 #ifdef CONFIG_FS_DAX
489 	struct erofs_fs_context *ctx = fc->fs_private;
490 
491 	switch (mode) {
492 	case EROFS_MOUNT_DAX_ALWAYS:
493 		warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
494 		set_opt(&ctx->opt, DAX_ALWAYS);
495 		clear_opt(&ctx->opt, DAX_NEVER);
496 		return true;
497 	case EROFS_MOUNT_DAX_NEVER:
498 		set_opt(&ctx->opt, DAX_NEVER);
499 		clear_opt(&ctx->opt, DAX_ALWAYS);
500 		return true;
501 	default:
502 		DBG_BUGON(1);
503 		return false;
504 	}
505 #else
506 	errorfc(fc, "dax options not supported");
507 	return false;
508 #endif
509 }
510 
511 static int erofs_fc_parse_param(struct fs_context *fc,
512 				struct fs_parameter *param)
513 {
514 	struct erofs_fs_context *ctx = fc->fs_private;
515 	struct fs_parse_result result;
516 	struct erofs_device_info *dif;
517 	int opt, ret;
518 
519 	opt = fs_parse(fc, erofs_fs_parameters, param, &result);
520 	if (opt < 0)
521 		return opt;
522 
523 	switch (opt) {
524 	case Opt_user_xattr:
525 #ifdef CONFIG_EROFS_FS_XATTR
526 		if (result.boolean)
527 			set_opt(&ctx->opt, XATTR_USER);
528 		else
529 			clear_opt(&ctx->opt, XATTR_USER);
530 #else
531 		errorfc(fc, "{,no}user_xattr options not supported");
532 #endif
533 		break;
534 	case Opt_acl:
535 #ifdef CONFIG_EROFS_FS_POSIX_ACL
536 		if (result.boolean)
537 			set_opt(&ctx->opt, POSIX_ACL);
538 		else
539 			clear_opt(&ctx->opt, POSIX_ACL);
540 #else
541 		errorfc(fc, "{,no}acl options not supported");
542 #endif
543 		break;
544 	case Opt_cache_strategy:
545 #ifdef CONFIG_EROFS_FS_ZIP
546 		ctx->opt.cache_strategy = result.uint_32;
547 #else
548 		errorfc(fc, "compression not supported, cache_strategy ignored");
549 #endif
550 		break;
551 	case Opt_dax:
552 		if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
553 			return -EINVAL;
554 		break;
555 	case Opt_dax_enum:
556 		if (!erofs_fc_set_dax_mode(fc, result.uint_32))
557 			return -EINVAL;
558 		break;
559 	case Opt_device:
560 		dif = kzalloc(sizeof(*dif), GFP_KERNEL);
561 		if (!dif)
562 			return -ENOMEM;
563 		dif->path = kstrdup(param->string, GFP_KERNEL);
564 		if (!dif->path) {
565 			kfree(dif);
566 			return -ENOMEM;
567 		}
568 		down_write(&ctx->devs->rwsem);
569 		ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
570 		up_write(&ctx->devs->rwsem);
571 		if (ret < 0) {
572 			kfree(dif->path);
573 			kfree(dif);
574 			return ret;
575 		}
576 		++ctx->devs->extra_devices;
577 		break;
578 #ifdef CONFIG_EROFS_FS_ONDEMAND
579 	case Opt_fsid:
580 		kfree(ctx->fsid);
581 		ctx->fsid = kstrdup(param->string, GFP_KERNEL);
582 		if (!ctx->fsid)
583 			return -ENOMEM;
584 		break;
585 	case Opt_domain_id:
586 		kfree(ctx->domain_id);
587 		ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
588 		if (!ctx->domain_id)
589 			return -ENOMEM;
590 		break;
591 #else
592 	case Opt_fsid:
593 	case Opt_domain_id:
594 		errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
595 		break;
596 #endif
597 	default:
598 		return -ENOPARAM;
599 	}
600 	return 0;
601 }
602 
603 static struct inode *erofs_nfs_get_inode(struct super_block *sb,
604 					 u64 ino, u32 generation)
605 {
606 	return erofs_iget(sb, ino);
607 }
608 
609 static struct dentry *erofs_fh_to_dentry(struct super_block *sb,
610 		struct fid *fid, int fh_len, int fh_type)
611 {
612 	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
613 				    erofs_nfs_get_inode);
614 }
615 
616 static struct dentry *erofs_fh_to_parent(struct super_block *sb,
617 		struct fid *fid, int fh_len, int fh_type)
618 {
619 	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
620 				    erofs_nfs_get_inode);
621 }
622 
623 static struct dentry *erofs_get_parent(struct dentry *child)
624 {
625 	erofs_nid_t nid;
626 	unsigned int d_type;
627 	int err;
628 
629 	err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type);
630 	if (err)
631 		return ERR_PTR(err);
632 	return d_obtain_alias(erofs_iget(child->d_sb, nid));
633 }
634 
635 static const struct export_operations erofs_export_ops = {
636 	.fh_to_dentry = erofs_fh_to_dentry,
637 	.fh_to_parent = erofs_fh_to_parent,
638 	.get_parent = erofs_get_parent,
639 };
640 
641 static int erofs_fc_fill_pseudo_super(struct super_block *sb, struct fs_context *fc)
642 {
643 	static const struct tree_descr empty_descr = {""};
644 
645 	return simple_fill_super(sb, EROFS_SUPER_MAGIC, &empty_descr);
646 }
647 
648 static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
649 {
650 	struct inode *inode;
651 	struct erofs_sb_info *sbi;
652 	struct erofs_fs_context *ctx = fc->fs_private;
653 	int err;
654 
655 	sb->s_magic = EROFS_SUPER_MAGIC;
656 	sb->s_flags |= SB_RDONLY | SB_NOATIME;
657 	sb->s_maxbytes = MAX_LFS_FILESIZE;
658 	sb->s_op = &erofs_sops;
659 
660 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
661 	if (!sbi)
662 		return -ENOMEM;
663 
664 	sb->s_fs_info = sbi;
665 	sbi->opt = ctx->opt;
666 	sbi->devs = ctx->devs;
667 	ctx->devs = NULL;
668 	sbi->fsid = ctx->fsid;
669 	ctx->fsid = NULL;
670 	sbi->domain_id = ctx->domain_id;
671 	ctx->domain_id = NULL;
672 
673 	sbi->blkszbits = PAGE_SHIFT;
674 	if (erofs_is_fscache_mode(sb)) {
675 		sb->s_blocksize = PAGE_SIZE;
676 		sb->s_blocksize_bits = PAGE_SHIFT;
677 
678 		err = erofs_fscache_register_fs(sb);
679 		if (err)
680 			return err;
681 
682 		err = super_setup_bdi(sb);
683 		if (err)
684 			return err;
685 	} else {
686 		if (!sb_set_blocksize(sb, PAGE_SIZE)) {
687 			errorfc(fc, "failed to set initial blksize");
688 			return -EINVAL;
689 		}
690 
691 		sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
692 						  &sbi->dax_part_off,
693 						  NULL, NULL);
694 	}
695 
696 	err = erofs_read_superblock(sb);
697 	if (err)
698 		return err;
699 
700 	if (sb->s_blocksize_bits != sbi->blkszbits) {
701 		if (erofs_is_fscache_mode(sb)) {
702 			errorfc(fc, "unsupported blksize for fscache mode");
703 			return -EINVAL;
704 		}
705 		if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
706 			errorfc(fc, "failed to set erofs blksize");
707 			return -EINVAL;
708 		}
709 	}
710 
711 	if (test_opt(&sbi->opt, DAX_ALWAYS)) {
712 		if (!sbi->dax_dev) {
713 			errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
714 			clear_opt(&sbi->opt, DAX_ALWAYS);
715 		} else if (sbi->blkszbits != PAGE_SHIFT) {
716 			errorfc(fc, "unsupported blocksize for DAX");
717 			clear_opt(&sbi->opt, DAX_ALWAYS);
718 		}
719 	}
720 
721 	sb->s_time_gran = 1;
722 	sb->s_xattr = erofs_xattr_handlers;
723 	sb->s_export_op = &erofs_export_ops;
724 
725 	if (test_opt(&sbi->opt, POSIX_ACL))
726 		sb->s_flags |= SB_POSIXACL;
727 	else
728 		sb->s_flags &= ~SB_POSIXACL;
729 
730 #ifdef CONFIG_EROFS_FS_ZIP
731 	xa_init(&sbi->managed_pslots);
732 #endif
733 
734 	/* get the root inode */
735 	inode = erofs_iget(sb, ROOT_NID(sbi));
736 	if (IS_ERR(inode))
737 		return PTR_ERR(inode);
738 
739 	if (!S_ISDIR(inode->i_mode)) {
740 		erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
741 			  ROOT_NID(sbi), inode->i_mode);
742 		iput(inode);
743 		return -EINVAL;
744 	}
745 
746 	sb->s_root = d_make_root(inode);
747 	if (!sb->s_root)
748 		return -ENOMEM;
749 
750 	erofs_shrinker_register(sb);
751 	/* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
752 	if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
753 		sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
754 		if (IS_ERR(sbi->packed_inode)) {
755 			err = PTR_ERR(sbi->packed_inode);
756 			sbi->packed_inode = NULL;
757 			return err;
758 		}
759 	}
760 	err = erofs_init_managed_cache(sb);
761 	if (err)
762 		return err;
763 
764 	err = erofs_xattr_prefixes_init(sb);
765 	if (err)
766 		return err;
767 
768 	err = erofs_register_sysfs(sb);
769 	if (err)
770 		return err;
771 
772 	erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi));
773 	return 0;
774 }
775 
776 static int erofs_fc_anon_get_tree(struct fs_context *fc)
777 {
778 	return get_tree_nodev(fc, erofs_fc_fill_pseudo_super);
779 }
780 
781 static int erofs_fc_get_tree(struct fs_context *fc)
782 {
783 	struct erofs_fs_context *ctx = fc->fs_private;
784 
785 	if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
786 		return get_tree_nodev(fc, erofs_fc_fill_super);
787 
788 	return get_tree_bdev(fc, erofs_fc_fill_super);
789 }
790 
791 static int erofs_fc_reconfigure(struct fs_context *fc)
792 {
793 	struct super_block *sb = fc->root->d_sb;
794 	struct erofs_sb_info *sbi = EROFS_SB(sb);
795 	struct erofs_fs_context *ctx = fc->fs_private;
796 
797 	DBG_BUGON(!sb_rdonly(sb));
798 
799 	if (ctx->fsid || ctx->domain_id)
800 		erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
801 
802 	if (test_opt(&ctx->opt, POSIX_ACL))
803 		fc->sb_flags |= SB_POSIXACL;
804 	else
805 		fc->sb_flags &= ~SB_POSIXACL;
806 
807 	sbi->opt = ctx->opt;
808 
809 	fc->sb_flags |= SB_RDONLY;
810 	return 0;
811 }
812 
813 static int erofs_release_device_info(int id, void *ptr, void *data)
814 {
815 	struct erofs_device_info *dif = ptr;
816 
817 	fs_put_dax(dif->dax_dev, NULL);
818 	if (dif->bdev)
819 		blkdev_put(dif->bdev, &erofs_fs_type);
820 	erofs_fscache_unregister_cookie(dif->fscache);
821 	dif->fscache = NULL;
822 	kfree(dif->path);
823 	kfree(dif);
824 	return 0;
825 }
826 
827 static void erofs_free_dev_context(struct erofs_dev_context *devs)
828 {
829 	if (!devs)
830 		return;
831 	idr_for_each(&devs->tree, &erofs_release_device_info, NULL);
832 	idr_destroy(&devs->tree);
833 	kfree(devs);
834 }
835 
836 static void erofs_fc_free(struct fs_context *fc)
837 {
838 	struct erofs_fs_context *ctx = fc->fs_private;
839 
840 	erofs_free_dev_context(ctx->devs);
841 	kfree(ctx->fsid);
842 	kfree(ctx->domain_id);
843 	kfree(ctx);
844 }
845 
846 static const struct fs_context_operations erofs_context_ops = {
847 	.parse_param	= erofs_fc_parse_param,
848 	.get_tree       = erofs_fc_get_tree,
849 	.reconfigure    = erofs_fc_reconfigure,
850 	.free		= erofs_fc_free,
851 };
852 
853 static const struct fs_context_operations erofs_anon_context_ops = {
854 	.get_tree       = erofs_fc_anon_get_tree,
855 };
856 
857 static int erofs_init_fs_context(struct fs_context *fc)
858 {
859 	struct erofs_fs_context *ctx;
860 
861 	/* pseudo mount for anon inodes */
862 	if (fc->sb_flags & SB_KERNMOUNT) {
863 		fc->ops = &erofs_anon_context_ops;
864 		return 0;
865 	}
866 
867 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
868 	if (!ctx)
869 		return -ENOMEM;
870 	ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
871 	if (!ctx->devs) {
872 		kfree(ctx);
873 		return -ENOMEM;
874 	}
875 	fc->fs_private = ctx;
876 
877 	idr_init(&ctx->devs->tree);
878 	init_rwsem(&ctx->devs->rwsem);
879 	erofs_default_options(ctx);
880 	fc->ops = &erofs_context_ops;
881 	return 0;
882 }
883 
884 /*
885  * could be triggered after deactivate_locked_super()
886  * is called, thus including umount and failed to initialize.
887  */
888 static void erofs_kill_sb(struct super_block *sb)
889 {
890 	struct erofs_sb_info *sbi;
891 
892 	/* pseudo mount for anon inodes */
893 	if (sb->s_flags & SB_KERNMOUNT) {
894 		kill_anon_super(sb);
895 		return;
896 	}
897 
898 	if (erofs_is_fscache_mode(sb))
899 		kill_anon_super(sb);
900 	else
901 		kill_block_super(sb);
902 
903 	sbi = EROFS_SB(sb);
904 	if (!sbi)
905 		return;
906 
907 	erofs_free_dev_context(sbi->devs);
908 	fs_put_dax(sbi->dax_dev, NULL);
909 	erofs_fscache_unregister_fs(sb);
910 	kfree(sbi->fsid);
911 	kfree(sbi->domain_id);
912 	kfree(sbi);
913 	sb->s_fs_info = NULL;
914 }
915 
916 /* called when ->s_root is non-NULL */
917 static void erofs_put_super(struct super_block *sb)
918 {
919 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
920 
921 	DBG_BUGON(!sbi);
922 
923 	erofs_unregister_sysfs(sb);
924 	erofs_shrinker_unregister(sb);
925 	erofs_xattr_prefixes_cleanup(sb);
926 #ifdef CONFIG_EROFS_FS_ZIP
927 	iput(sbi->managed_cache);
928 	sbi->managed_cache = NULL;
929 #endif
930 	iput(sbi->packed_inode);
931 	sbi->packed_inode = NULL;
932 	erofs_free_dev_context(sbi->devs);
933 	sbi->devs = NULL;
934 	erofs_fscache_unregister_fs(sb);
935 }
936 
937 struct file_system_type erofs_fs_type = {
938 	.owner          = THIS_MODULE,
939 	.name           = "erofs",
940 	.init_fs_context = erofs_init_fs_context,
941 	.kill_sb        = erofs_kill_sb,
942 	.fs_flags       = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
943 };
944 MODULE_ALIAS_FS("erofs");
945 
946 static int __init erofs_module_init(void)
947 {
948 	int err;
949 
950 	erofs_check_ondisk_layout_definitions();
951 
952 	erofs_inode_cachep = kmem_cache_create("erofs_inode",
953 					       sizeof(struct erofs_inode), 0,
954 					       SLAB_RECLAIM_ACCOUNT,
955 					       erofs_inode_init_once);
956 	if (!erofs_inode_cachep)
957 		return -ENOMEM;
958 
959 	err = erofs_init_shrinker();
960 	if (err)
961 		goto shrinker_err;
962 
963 	err = z_erofs_lzma_init();
964 	if (err)
965 		goto lzma_err;
966 
967 	erofs_pcpubuf_init();
968 	err = z_erofs_init_zip_subsystem();
969 	if (err)
970 		goto zip_err;
971 
972 	err = erofs_init_sysfs();
973 	if (err)
974 		goto sysfs_err;
975 
976 	err = register_filesystem(&erofs_fs_type);
977 	if (err)
978 		goto fs_err;
979 
980 	return 0;
981 
982 fs_err:
983 	erofs_exit_sysfs();
984 sysfs_err:
985 	z_erofs_exit_zip_subsystem();
986 zip_err:
987 	z_erofs_lzma_exit();
988 lzma_err:
989 	erofs_exit_shrinker();
990 shrinker_err:
991 	kmem_cache_destroy(erofs_inode_cachep);
992 	return err;
993 }
994 
995 static void __exit erofs_module_exit(void)
996 {
997 	unregister_filesystem(&erofs_fs_type);
998 
999 	/* Ensure all RCU free inodes / pclusters are safe to be destroyed. */
1000 	rcu_barrier();
1001 
1002 	erofs_exit_sysfs();
1003 	z_erofs_exit_zip_subsystem();
1004 	z_erofs_lzma_exit();
1005 	erofs_exit_shrinker();
1006 	kmem_cache_destroy(erofs_inode_cachep);
1007 	erofs_pcpubuf_exit();
1008 }
1009 
1010 /* get filesystem statistics */
1011 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
1012 {
1013 	struct super_block *sb = dentry->d_sb;
1014 	struct erofs_sb_info *sbi = EROFS_SB(sb);
1015 	u64 id = 0;
1016 
1017 	if (!erofs_is_fscache_mode(sb))
1018 		id = huge_encode_dev(sb->s_bdev->bd_dev);
1019 
1020 	buf->f_type = sb->s_magic;
1021 	buf->f_bsize = sb->s_blocksize;
1022 	buf->f_blocks = sbi->total_blocks;
1023 	buf->f_bfree = buf->f_bavail = 0;
1024 
1025 	buf->f_files = ULLONG_MAX;
1026 	buf->f_ffree = ULLONG_MAX - sbi->inos;
1027 
1028 	buf->f_namelen = EROFS_NAME_LEN;
1029 
1030 	buf->f_fsid    = u64_to_fsid(id);
1031 	return 0;
1032 }
1033 
1034 static int erofs_show_options(struct seq_file *seq, struct dentry *root)
1035 {
1036 	struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
1037 	struct erofs_mount_opts *opt = &sbi->opt;
1038 
1039 #ifdef CONFIG_EROFS_FS_XATTR
1040 	if (test_opt(opt, XATTR_USER))
1041 		seq_puts(seq, ",user_xattr");
1042 	else
1043 		seq_puts(seq, ",nouser_xattr");
1044 #endif
1045 #ifdef CONFIG_EROFS_FS_POSIX_ACL
1046 	if (test_opt(opt, POSIX_ACL))
1047 		seq_puts(seq, ",acl");
1048 	else
1049 		seq_puts(seq, ",noacl");
1050 #endif
1051 #ifdef CONFIG_EROFS_FS_ZIP
1052 	if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
1053 		seq_puts(seq, ",cache_strategy=disabled");
1054 	else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
1055 		seq_puts(seq, ",cache_strategy=readahead");
1056 	else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
1057 		seq_puts(seq, ",cache_strategy=readaround");
1058 #endif
1059 	if (test_opt(opt, DAX_ALWAYS))
1060 		seq_puts(seq, ",dax=always");
1061 	if (test_opt(opt, DAX_NEVER))
1062 		seq_puts(seq, ",dax=never");
1063 #ifdef CONFIG_EROFS_FS_ONDEMAND
1064 	if (sbi->fsid)
1065 		seq_printf(seq, ",fsid=%s", sbi->fsid);
1066 	if (sbi->domain_id)
1067 		seq_printf(seq, ",domain_id=%s", sbi->domain_id);
1068 #endif
1069 	return 0;
1070 }
1071 
1072 const struct super_operations erofs_sops = {
1073 	.put_super = erofs_put_super,
1074 	.alloc_inode = erofs_alloc_inode,
1075 	.free_inode = erofs_free_inode,
1076 	.statfs = erofs_statfs,
1077 	.show_options = erofs_show_options,
1078 };
1079 
1080 module_init(erofs_module_init);
1081 module_exit(erofs_module_exit);
1082 
1083 MODULE_DESCRIPTION("Enhanced ROM File System");
1084 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
1085 MODULE_LICENSE("GPL");
1086