xref: /linux/fs/erofs/super.c (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include <linux/module.h>
8 #include <linux/statfs.h>
9 #include <linux/parser.h>
10 #include <linux/seq_file.h>
11 #include <linux/crc32c.h>
12 #include <linux/fs_context.h>
13 #include <linux/fs_parser.h>
14 #include <linux/dax.h>
15 #include <linux/exportfs.h>
16 #include "xattr.h"
17 
18 #define CREATE_TRACE_POINTS
19 #include <trace/events/erofs.h>
20 
21 static struct kmem_cache *erofs_inode_cachep __read_mostly;
22 
23 void _erofs_err(struct super_block *sb, const char *function,
24 		const char *fmt, ...)
25 {
26 	struct va_format vaf;
27 	va_list args;
28 
29 	va_start(args, fmt);
30 
31 	vaf.fmt = fmt;
32 	vaf.va = &args;
33 
34 	pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf);
35 	va_end(args);
36 }
37 
38 void _erofs_info(struct super_block *sb, const char *function,
39 		 const char *fmt, ...)
40 {
41 	struct va_format vaf;
42 	va_list args;
43 
44 	va_start(args, fmt);
45 
46 	vaf.fmt = fmt;
47 	vaf.va = &args;
48 
49 	pr_info("(device %s): %pV", sb->s_id, &vaf);
50 	va_end(args);
51 }
52 
53 static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
54 {
55 	size_t len = 1 << EROFS_SB(sb)->blkszbits;
56 	struct erofs_super_block *dsb;
57 	u32 expected_crc, crc;
58 
59 	if (len > EROFS_SUPER_OFFSET)
60 		len -= EROFS_SUPER_OFFSET;
61 
62 	dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL);
63 	if (!dsb)
64 		return -ENOMEM;
65 
66 	expected_crc = le32_to_cpu(dsb->checksum);
67 	dsb->checksum = 0;
68 	/* to allow for x86 boot sectors and other oddities. */
69 	crc = crc32c(~0, dsb, len);
70 	kfree(dsb);
71 
72 	if (crc != expected_crc) {
73 		erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
74 			  crc, expected_crc);
75 		return -EBADMSG;
76 	}
77 	return 0;
78 }
79 
80 static void erofs_inode_init_once(void *ptr)
81 {
82 	struct erofs_inode *vi = ptr;
83 
84 	inode_init_once(&vi->vfs_inode);
85 }
86 
87 static struct inode *erofs_alloc_inode(struct super_block *sb)
88 {
89 	struct erofs_inode *vi =
90 		alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL);
91 
92 	if (!vi)
93 		return NULL;
94 
95 	/* zero out everything except vfs_inode */
96 	memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
97 	return &vi->vfs_inode;
98 }
99 
100 static void erofs_free_inode(struct inode *inode)
101 {
102 	struct erofs_inode *vi = EROFS_I(inode);
103 
104 	/* be careful of RCU symlink path */
105 	if (inode->i_op == &erofs_fast_symlink_iops)
106 		kfree(inode->i_link);
107 	kfree(vi->xattr_shared_xattrs);
108 
109 	kmem_cache_free(erofs_inode_cachep, vi);
110 }
111 
112 static bool check_layout_compatibility(struct super_block *sb,
113 				       struct erofs_super_block *dsb)
114 {
115 	const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
116 
117 	EROFS_SB(sb)->feature_incompat = feature;
118 
119 	/* check if current kernel meets all mandatory requirements */
120 	if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
121 		erofs_err(sb,
122 			  "unidentified incompatible feature %x, please upgrade kernel version",
123 			   feature & ~EROFS_ALL_FEATURE_INCOMPAT);
124 		return false;
125 	}
126 	return true;
127 }
128 
129 /* read variable-sized metadata, offset will be aligned by 4-byte */
130 void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
131 			  erofs_off_t *offset, int *lengthp)
132 {
133 	u8 *buffer, *ptr;
134 	int len, i, cnt;
135 
136 	*offset = round_up(*offset, 4);
137 	ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
138 	if (IS_ERR(ptr))
139 		return ptr;
140 
141 	len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]);
142 	if (!len)
143 		len = U16_MAX + 1;
144 	buffer = kmalloc(len, GFP_KERNEL);
145 	if (!buffer)
146 		return ERR_PTR(-ENOMEM);
147 	*offset += sizeof(__le16);
148 	*lengthp = len;
149 
150 	for (i = 0; i < len; i += cnt) {
151 		cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset),
152 			    len - i);
153 		ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
154 		if (IS_ERR(ptr)) {
155 			kfree(buffer);
156 			return ptr;
157 		}
158 		memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt);
159 		*offset += cnt;
160 	}
161 	return buffer;
162 }
163 
164 #ifdef CONFIG_EROFS_FS_ZIP
165 static int erofs_load_compr_cfgs(struct super_block *sb,
166 				 struct erofs_super_block *dsb)
167 {
168 	struct erofs_sb_info *sbi = EROFS_SB(sb);
169 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
170 	unsigned int algs, alg;
171 	erofs_off_t offset;
172 	int size, ret = 0;
173 
174 	sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
175 	if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
176 		erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
177 			  sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
178 		return -EINVAL;
179 	}
180 
181 	erofs_init_metabuf(&buf, sb);
182 	offset = EROFS_SUPER_OFFSET + sbi->sb_size;
183 	alg = 0;
184 	for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
185 		void *data;
186 
187 		if (!(algs & 1))
188 			continue;
189 
190 		data = erofs_read_metadata(sb, &buf, &offset, &size);
191 		if (IS_ERR(data)) {
192 			ret = PTR_ERR(data);
193 			break;
194 		}
195 
196 		switch (alg) {
197 		case Z_EROFS_COMPRESSION_LZ4:
198 			ret = z_erofs_load_lz4_config(sb, dsb, data, size);
199 			break;
200 		case Z_EROFS_COMPRESSION_LZMA:
201 			ret = z_erofs_load_lzma_config(sb, dsb, data, size);
202 			break;
203 		default:
204 			DBG_BUGON(1);
205 			ret = -EFAULT;
206 		}
207 		kfree(data);
208 		if (ret)
209 			break;
210 	}
211 	erofs_put_metabuf(&buf);
212 	return ret;
213 }
214 #else
215 static int erofs_load_compr_cfgs(struct super_block *sb,
216 				 struct erofs_super_block *dsb)
217 {
218 	if (dsb->u1.available_compr_algs) {
219 		erofs_err(sb, "try to load compressed fs when compression is disabled");
220 		return -EINVAL;
221 	}
222 	return 0;
223 }
224 #endif
225 
226 static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
227 			     struct erofs_device_info *dif, erofs_off_t *pos)
228 {
229 	struct erofs_sb_info *sbi = EROFS_SB(sb);
230 	struct erofs_fscache *fscache;
231 	struct erofs_deviceslot *dis;
232 	struct block_device *bdev;
233 	void *ptr;
234 
235 	ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
236 	if (IS_ERR(ptr))
237 		return PTR_ERR(ptr);
238 	dis = ptr + erofs_blkoff(sb, *pos);
239 
240 	if (!dif->path) {
241 		if (!dis->tag[0]) {
242 			erofs_err(sb, "empty device tag @ pos %llu", *pos);
243 			return -EINVAL;
244 		}
245 		dif->path = kmemdup_nul(dis->tag, sizeof(dis->tag), GFP_KERNEL);
246 		if (!dif->path)
247 			return -ENOMEM;
248 	}
249 
250 	if (erofs_is_fscache_mode(sb)) {
251 		fscache = erofs_fscache_register_cookie(sb, dif->path, 0);
252 		if (IS_ERR(fscache))
253 			return PTR_ERR(fscache);
254 		dif->fscache = fscache;
255 	} else if (!sbi->devs->flatdev) {
256 		bdev = blkdev_get_by_path(dif->path, FMODE_READ | FMODE_EXCL,
257 					  sb->s_type);
258 		if (IS_ERR(bdev))
259 			return PTR_ERR(bdev);
260 		dif->bdev = bdev;
261 		dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off,
262 						  NULL, NULL);
263 	}
264 
265 	dif->blocks = le32_to_cpu(dis->blocks);
266 	dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
267 	sbi->total_blocks += dif->blocks;
268 	*pos += EROFS_DEVT_SLOT_SIZE;
269 	return 0;
270 }
271 
272 static int erofs_scan_devices(struct super_block *sb,
273 			      struct erofs_super_block *dsb)
274 {
275 	struct erofs_sb_info *sbi = EROFS_SB(sb);
276 	unsigned int ondisk_extradevs;
277 	erofs_off_t pos;
278 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
279 	struct erofs_device_info *dif;
280 	int id, err = 0;
281 
282 	sbi->total_blocks = sbi->primarydevice_blocks;
283 	if (!erofs_sb_has_device_table(sbi))
284 		ondisk_extradevs = 0;
285 	else
286 		ondisk_extradevs = le16_to_cpu(dsb->extra_devices);
287 
288 	if (sbi->devs->extra_devices &&
289 	    ondisk_extradevs != sbi->devs->extra_devices) {
290 		erofs_err(sb, "extra devices don't match (ondisk %u, given %u)",
291 			  ondisk_extradevs, sbi->devs->extra_devices);
292 		return -EINVAL;
293 	}
294 	if (!ondisk_extradevs)
295 		return 0;
296 
297 	if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb))
298 		sbi->devs->flatdev = true;
299 
300 	sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
301 	pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
302 	down_read(&sbi->devs->rwsem);
303 	if (sbi->devs->extra_devices) {
304 		idr_for_each_entry(&sbi->devs->tree, dif, id) {
305 			err = erofs_init_device(&buf, sb, dif, &pos);
306 			if (err)
307 				break;
308 		}
309 	} else {
310 		for (id = 0; id < ondisk_extradevs; id++) {
311 			dif = kzalloc(sizeof(*dif), GFP_KERNEL);
312 			if (!dif) {
313 				err = -ENOMEM;
314 				break;
315 			}
316 
317 			err = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
318 			if (err < 0) {
319 				kfree(dif);
320 				break;
321 			}
322 			++sbi->devs->extra_devices;
323 
324 			err = erofs_init_device(&buf, sb, dif, &pos);
325 			if (err)
326 				break;
327 		}
328 	}
329 	up_read(&sbi->devs->rwsem);
330 	erofs_put_metabuf(&buf);
331 	return err;
332 }
333 
334 static int erofs_read_superblock(struct super_block *sb)
335 {
336 	struct erofs_sb_info *sbi;
337 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
338 	struct erofs_super_block *dsb;
339 	void *data;
340 	int ret;
341 
342 	data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP);
343 	if (IS_ERR(data)) {
344 		erofs_err(sb, "cannot read erofs superblock");
345 		return PTR_ERR(data);
346 	}
347 
348 	sbi = EROFS_SB(sb);
349 	dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
350 
351 	ret = -EINVAL;
352 	if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
353 		erofs_err(sb, "cannot find valid erofs superblock");
354 		goto out;
355 	}
356 
357 	sbi->blkszbits  = dsb->blkszbits;
358 	if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) {
359 		erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits);
360 		goto out;
361 	}
362 	if (dsb->dirblkbits) {
363 		erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits);
364 		goto out;
365 	}
366 
367 	sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
368 	if (erofs_sb_has_sb_chksum(sbi)) {
369 		ret = erofs_superblock_csum_verify(sb, data);
370 		if (ret)
371 			goto out;
372 	}
373 
374 	ret = -EINVAL;
375 	if (!check_layout_compatibility(sb, dsb))
376 		goto out;
377 
378 	sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
379 	if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) {
380 		erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
381 			  sbi->sb_size);
382 		goto out;
383 	}
384 	sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
385 	sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
386 #ifdef CONFIG_EROFS_FS_XATTR
387 	sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
388 	sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start);
389 	sbi->xattr_prefix_count = dsb->xattr_prefix_count;
390 #endif
391 	sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
392 	sbi->root_nid = le16_to_cpu(dsb->root_nid);
393 	sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
394 	sbi->inos = le64_to_cpu(dsb->inos);
395 
396 	sbi->build_time = le64_to_cpu(dsb->build_time);
397 	sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
398 
399 	memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
400 
401 	ret = strscpy(sbi->volume_name, dsb->volume_name,
402 		      sizeof(dsb->volume_name));
403 	if (ret < 0) {	/* -E2BIG */
404 		erofs_err(sb, "bad volume name without NIL terminator");
405 		ret = -EFSCORRUPTED;
406 		goto out;
407 	}
408 
409 	/* parse on-disk compression configurations */
410 	if (erofs_sb_has_compr_cfgs(sbi))
411 		ret = erofs_load_compr_cfgs(sb, dsb);
412 	else
413 		ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
414 	if (ret < 0)
415 		goto out;
416 
417 	/* handle multiple devices */
418 	ret = erofs_scan_devices(sb, dsb);
419 
420 	if (erofs_is_fscache_mode(sb))
421 		erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!");
422 	if (erofs_sb_has_fragments(sbi))
423 		erofs_info(sb, "EXPERIMENTAL compressed fragments feature in use. Use at your own risk!");
424 	if (erofs_sb_has_dedupe(sbi))
425 		erofs_info(sb, "EXPERIMENTAL global deduplication feature in use. Use at your own risk!");
426 out:
427 	erofs_put_metabuf(&buf);
428 	return ret;
429 }
430 
431 /* set up default EROFS parameters */
432 static void erofs_default_options(struct erofs_fs_context *ctx)
433 {
434 #ifdef CONFIG_EROFS_FS_ZIP
435 	ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
436 	ctx->opt.max_sync_decompress_pages = 3;
437 	ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
438 #endif
439 #ifdef CONFIG_EROFS_FS_XATTR
440 	set_opt(&ctx->opt, XATTR_USER);
441 #endif
442 #ifdef CONFIG_EROFS_FS_POSIX_ACL
443 	set_opt(&ctx->opt, POSIX_ACL);
444 #endif
445 }
446 
447 enum {
448 	Opt_user_xattr,
449 	Opt_acl,
450 	Opt_cache_strategy,
451 	Opt_dax,
452 	Opt_dax_enum,
453 	Opt_device,
454 	Opt_fsid,
455 	Opt_domain_id,
456 	Opt_err
457 };
458 
459 static const struct constant_table erofs_param_cache_strategy[] = {
460 	{"disabled",	EROFS_ZIP_CACHE_DISABLED},
461 	{"readahead",	EROFS_ZIP_CACHE_READAHEAD},
462 	{"readaround",	EROFS_ZIP_CACHE_READAROUND},
463 	{}
464 };
465 
466 static const struct constant_table erofs_dax_param_enums[] = {
467 	{"always",	EROFS_MOUNT_DAX_ALWAYS},
468 	{"never",	EROFS_MOUNT_DAX_NEVER},
469 	{}
470 };
471 
472 static const struct fs_parameter_spec erofs_fs_parameters[] = {
473 	fsparam_flag_no("user_xattr",	Opt_user_xattr),
474 	fsparam_flag_no("acl",		Opt_acl),
475 	fsparam_enum("cache_strategy",	Opt_cache_strategy,
476 		     erofs_param_cache_strategy),
477 	fsparam_flag("dax",             Opt_dax),
478 	fsparam_enum("dax",		Opt_dax_enum, erofs_dax_param_enums),
479 	fsparam_string("device",	Opt_device),
480 	fsparam_string("fsid",		Opt_fsid),
481 	fsparam_string("domain_id",	Opt_domain_id),
482 	{}
483 };
484 
485 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
486 {
487 #ifdef CONFIG_FS_DAX
488 	struct erofs_fs_context *ctx = fc->fs_private;
489 
490 	switch (mode) {
491 	case EROFS_MOUNT_DAX_ALWAYS:
492 		warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
493 		set_opt(&ctx->opt, DAX_ALWAYS);
494 		clear_opt(&ctx->opt, DAX_NEVER);
495 		return true;
496 	case EROFS_MOUNT_DAX_NEVER:
497 		set_opt(&ctx->opt, DAX_NEVER);
498 		clear_opt(&ctx->opt, DAX_ALWAYS);
499 		return true;
500 	default:
501 		DBG_BUGON(1);
502 		return false;
503 	}
504 #else
505 	errorfc(fc, "dax options not supported");
506 	return false;
507 #endif
508 }
509 
510 static int erofs_fc_parse_param(struct fs_context *fc,
511 				struct fs_parameter *param)
512 {
513 	struct erofs_fs_context *ctx = fc->fs_private;
514 	struct fs_parse_result result;
515 	struct erofs_device_info *dif;
516 	int opt, ret;
517 
518 	opt = fs_parse(fc, erofs_fs_parameters, param, &result);
519 	if (opt < 0)
520 		return opt;
521 
522 	switch (opt) {
523 	case Opt_user_xattr:
524 #ifdef CONFIG_EROFS_FS_XATTR
525 		if (result.boolean)
526 			set_opt(&ctx->opt, XATTR_USER);
527 		else
528 			clear_opt(&ctx->opt, XATTR_USER);
529 #else
530 		errorfc(fc, "{,no}user_xattr options not supported");
531 #endif
532 		break;
533 	case Opt_acl:
534 #ifdef CONFIG_EROFS_FS_POSIX_ACL
535 		if (result.boolean)
536 			set_opt(&ctx->opt, POSIX_ACL);
537 		else
538 			clear_opt(&ctx->opt, POSIX_ACL);
539 #else
540 		errorfc(fc, "{,no}acl options not supported");
541 #endif
542 		break;
543 	case Opt_cache_strategy:
544 #ifdef CONFIG_EROFS_FS_ZIP
545 		ctx->opt.cache_strategy = result.uint_32;
546 #else
547 		errorfc(fc, "compression not supported, cache_strategy ignored");
548 #endif
549 		break;
550 	case Opt_dax:
551 		if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
552 			return -EINVAL;
553 		break;
554 	case Opt_dax_enum:
555 		if (!erofs_fc_set_dax_mode(fc, result.uint_32))
556 			return -EINVAL;
557 		break;
558 	case Opt_device:
559 		dif = kzalloc(sizeof(*dif), GFP_KERNEL);
560 		if (!dif)
561 			return -ENOMEM;
562 		dif->path = kstrdup(param->string, GFP_KERNEL);
563 		if (!dif->path) {
564 			kfree(dif);
565 			return -ENOMEM;
566 		}
567 		down_write(&ctx->devs->rwsem);
568 		ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
569 		up_write(&ctx->devs->rwsem);
570 		if (ret < 0) {
571 			kfree(dif->path);
572 			kfree(dif);
573 			return ret;
574 		}
575 		++ctx->devs->extra_devices;
576 		break;
577 #ifdef CONFIG_EROFS_FS_ONDEMAND
578 	case Opt_fsid:
579 		kfree(ctx->fsid);
580 		ctx->fsid = kstrdup(param->string, GFP_KERNEL);
581 		if (!ctx->fsid)
582 			return -ENOMEM;
583 		break;
584 	case Opt_domain_id:
585 		kfree(ctx->domain_id);
586 		ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
587 		if (!ctx->domain_id)
588 			return -ENOMEM;
589 		break;
590 #else
591 	case Opt_fsid:
592 	case Opt_domain_id:
593 		errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
594 		break;
595 #endif
596 	default:
597 		return -ENOPARAM;
598 	}
599 	return 0;
600 }
601 
602 #ifdef CONFIG_EROFS_FS_ZIP
603 static const struct address_space_operations managed_cache_aops;
604 
605 static bool erofs_managed_cache_release_folio(struct folio *folio, gfp_t gfp)
606 {
607 	bool ret = true;
608 	struct address_space *const mapping = folio->mapping;
609 
610 	DBG_BUGON(!folio_test_locked(folio));
611 	DBG_BUGON(mapping->a_ops != &managed_cache_aops);
612 
613 	if (folio_test_private(folio))
614 		ret = erofs_try_to_free_cached_page(&folio->page);
615 
616 	return ret;
617 }
618 
619 /*
620  * It will be called only on inode eviction. In case that there are still some
621  * decompression requests in progress, wait with rescheduling for a bit here.
622  * We could introduce an extra locking instead but it seems unnecessary.
623  */
624 static void erofs_managed_cache_invalidate_folio(struct folio *folio,
625 					       size_t offset, size_t length)
626 {
627 	const size_t stop = length + offset;
628 
629 	DBG_BUGON(!folio_test_locked(folio));
630 
631 	/* Check for potential overflow in debug mode */
632 	DBG_BUGON(stop > folio_size(folio) || stop < length);
633 
634 	if (offset == 0 && stop == folio_size(folio))
635 		while (!erofs_managed_cache_release_folio(folio, GFP_NOFS))
636 			cond_resched();
637 }
638 
639 static const struct address_space_operations managed_cache_aops = {
640 	.release_folio = erofs_managed_cache_release_folio,
641 	.invalidate_folio = erofs_managed_cache_invalidate_folio,
642 };
643 
644 static int erofs_init_managed_cache(struct super_block *sb)
645 {
646 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
647 	struct inode *const inode = new_inode(sb);
648 
649 	if (!inode)
650 		return -ENOMEM;
651 
652 	set_nlink(inode, 1);
653 	inode->i_size = OFFSET_MAX;
654 
655 	inode->i_mapping->a_ops = &managed_cache_aops;
656 	mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
657 	sbi->managed_cache = inode;
658 	return 0;
659 }
660 #else
661 static int erofs_init_managed_cache(struct super_block *sb) { return 0; }
662 #endif
663 
664 static struct inode *erofs_nfs_get_inode(struct super_block *sb,
665 					 u64 ino, u32 generation)
666 {
667 	return erofs_iget(sb, ino);
668 }
669 
670 static struct dentry *erofs_fh_to_dentry(struct super_block *sb,
671 		struct fid *fid, int fh_len, int fh_type)
672 {
673 	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
674 				    erofs_nfs_get_inode);
675 }
676 
677 static struct dentry *erofs_fh_to_parent(struct super_block *sb,
678 		struct fid *fid, int fh_len, int fh_type)
679 {
680 	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
681 				    erofs_nfs_get_inode);
682 }
683 
684 static struct dentry *erofs_get_parent(struct dentry *child)
685 {
686 	erofs_nid_t nid;
687 	unsigned int d_type;
688 	int err;
689 
690 	err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type);
691 	if (err)
692 		return ERR_PTR(err);
693 	return d_obtain_alias(erofs_iget(child->d_sb, nid));
694 }
695 
696 static const struct export_operations erofs_export_ops = {
697 	.fh_to_dentry = erofs_fh_to_dentry,
698 	.fh_to_parent = erofs_fh_to_parent,
699 	.get_parent = erofs_get_parent,
700 };
701 
702 static int erofs_fc_fill_pseudo_super(struct super_block *sb, struct fs_context *fc)
703 {
704 	static const struct tree_descr empty_descr = {""};
705 
706 	return simple_fill_super(sb, EROFS_SUPER_MAGIC, &empty_descr);
707 }
708 
709 static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
710 {
711 	struct inode *inode;
712 	struct erofs_sb_info *sbi;
713 	struct erofs_fs_context *ctx = fc->fs_private;
714 	int err;
715 
716 	sb->s_magic = EROFS_SUPER_MAGIC;
717 	sb->s_flags |= SB_RDONLY | SB_NOATIME;
718 	sb->s_maxbytes = MAX_LFS_FILESIZE;
719 	sb->s_op = &erofs_sops;
720 
721 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
722 	if (!sbi)
723 		return -ENOMEM;
724 
725 	sb->s_fs_info = sbi;
726 	sbi->opt = ctx->opt;
727 	sbi->devs = ctx->devs;
728 	ctx->devs = NULL;
729 	sbi->fsid = ctx->fsid;
730 	ctx->fsid = NULL;
731 	sbi->domain_id = ctx->domain_id;
732 	ctx->domain_id = NULL;
733 
734 	sbi->blkszbits = PAGE_SHIFT;
735 	if (erofs_is_fscache_mode(sb)) {
736 		sb->s_blocksize = PAGE_SIZE;
737 		sb->s_blocksize_bits = PAGE_SHIFT;
738 
739 		err = erofs_fscache_register_fs(sb);
740 		if (err)
741 			return err;
742 
743 		err = super_setup_bdi(sb);
744 		if (err)
745 			return err;
746 	} else {
747 		if (!sb_set_blocksize(sb, PAGE_SIZE)) {
748 			errorfc(fc, "failed to set initial blksize");
749 			return -EINVAL;
750 		}
751 
752 		sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
753 						  &sbi->dax_part_off,
754 						  NULL, NULL);
755 	}
756 
757 	err = erofs_read_superblock(sb);
758 	if (err)
759 		return err;
760 
761 	if (sb->s_blocksize_bits != sbi->blkszbits) {
762 		if (erofs_is_fscache_mode(sb)) {
763 			errorfc(fc, "unsupported blksize for fscache mode");
764 			return -EINVAL;
765 		}
766 		if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
767 			errorfc(fc, "failed to set erofs blksize");
768 			return -EINVAL;
769 		}
770 	}
771 
772 	if (test_opt(&sbi->opt, DAX_ALWAYS)) {
773 		if (!sbi->dax_dev) {
774 			errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
775 			clear_opt(&sbi->opt, DAX_ALWAYS);
776 		} else if (sbi->blkszbits != PAGE_SHIFT) {
777 			errorfc(fc, "unsupported blocksize for DAX");
778 			clear_opt(&sbi->opt, DAX_ALWAYS);
779 		}
780 	}
781 
782 	sb->s_time_gran = 1;
783 	sb->s_xattr = erofs_xattr_handlers;
784 	sb->s_export_op = &erofs_export_ops;
785 
786 	if (test_opt(&sbi->opt, POSIX_ACL))
787 		sb->s_flags |= SB_POSIXACL;
788 	else
789 		sb->s_flags &= ~SB_POSIXACL;
790 
791 #ifdef CONFIG_EROFS_FS_ZIP
792 	xa_init(&sbi->managed_pslots);
793 #endif
794 
795 	/* get the root inode */
796 	inode = erofs_iget(sb, ROOT_NID(sbi));
797 	if (IS_ERR(inode))
798 		return PTR_ERR(inode);
799 
800 	if (!S_ISDIR(inode->i_mode)) {
801 		erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
802 			  ROOT_NID(sbi), inode->i_mode);
803 		iput(inode);
804 		return -EINVAL;
805 	}
806 
807 	sb->s_root = d_make_root(inode);
808 	if (!sb->s_root)
809 		return -ENOMEM;
810 
811 	erofs_shrinker_register(sb);
812 	/* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
813 	if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
814 		sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
815 		if (IS_ERR(sbi->packed_inode)) {
816 			err = PTR_ERR(sbi->packed_inode);
817 			sbi->packed_inode = NULL;
818 			return err;
819 		}
820 	}
821 	err = erofs_init_managed_cache(sb);
822 	if (err)
823 		return err;
824 
825 	err = erofs_xattr_prefixes_init(sb);
826 	if (err)
827 		return err;
828 
829 	err = erofs_register_sysfs(sb);
830 	if (err)
831 		return err;
832 
833 	erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi));
834 	return 0;
835 }
836 
837 static int erofs_fc_anon_get_tree(struct fs_context *fc)
838 {
839 	return get_tree_nodev(fc, erofs_fc_fill_pseudo_super);
840 }
841 
842 static int erofs_fc_get_tree(struct fs_context *fc)
843 {
844 	struct erofs_fs_context *ctx = fc->fs_private;
845 
846 	if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
847 		return get_tree_nodev(fc, erofs_fc_fill_super);
848 
849 	return get_tree_bdev(fc, erofs_fc_fill_super);
850 }
851 
852 static int erofs_fc_reconfigure(struct fs_context *fc)
853 {
854 	struct super_block *sb = fc->root->d_sb;
855 	struct erofs_sb_info *sbi = EROFS_SB(sb);
856 	struct erofs_fs_context *ctx = fc->fs_private;
857 
858 	DBG_BUGON(!sb_rdonly(sb));
859 
860 	if (ctx->fsid || ctx->domain_id)
861 		erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
862 
863 	if (test_opt(&ctx->opt, POSIX_ACL))
864 		fc->sb_flags |= SB_POSIXACL;
865 	else
866 		fc->sb_flags &= ~SB_POSIXACL;
867 
868 	sbi->opt = ctx->opt;
869 
870 	fc->sb_flags |= SB_RDONLY;
871 	return 0;
872 }
873 
874 static int erofs_release_device_info(int id, void *ptr, void *data)
875 {
876 	struct erofs_device_info *dif = ptr;
877 
878 	fs_put_dax(dif->dax_dev, NULL);
879 	if (dif->bdev)
880 		blkdev_put(dif->bdev, FMODE_READ | FMODE_EXCL);
881 	erofs_fscache_unregister_cookie(dif->fscache);
882 	dif->fscache = NULL;
883 	kfree(dif->path);
884 	kfree(dif);
885 	return 0;
886 }
887 
888 static void erofs_free_dev_context(struct erofs_dev_context *devs)
889 {
890 	if (!devs)
891 		return;
892 	idr_for_each(&devs->tree, &erofs_release_device_info, NULL);
893 	idr_destroy(&devs->tree);
894 	kfree(devs);
895 }
896 
897 static void erofs_fc_free(struct fs_context *fc)
898 {
899 	struct erofs_fs_context *ctx = fc->fs_private;
900 
901 	erofs_free_dev_context(ctx->devs);
902 	kfree(ctx->fsid);
903 	kfree(ctx->domain_id);
904 	kfree(ctx);
905 }
906 
907 static const struct fs_context_operations erofs_context_ops = {
908 	.parse_param	= erofs_fc_parse_param,
909 	.get_tree       = erofs_fc_get_tree,
910 	.reconfigure    = erofs_fc_reconfigure,
911 	.free		= erofs_fc_free,
912 };
913 
914 static const struct fs_context_operations erofs_anon_context_ops = {
915 	.get_tree       = erofs_fc_anon_get_tree,
916 };
917 
918 static int erofs_init_fs_context(struct fs_context *fc)
919 {
920 	struct erofs_fs_context *ctx;
921 
922 	/* pseudo mount for anon inodes */
923 	if (fc->sb_flags & SB_KERNMOUNT) {
924 		fc->ops = &erofs_anon_context_ops;
925 		return 0;
926 	}
927 
928 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
929 	if (!ctx)
930 		return -ENOMEM;
931 	ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
932 	if (!ctx->devs) {
933 		kfree(ctx);
934 		return -ENOMEM;
935 	}
936 	fc->fs_private = ctx;
937 
938 	idr_init(&ctx->devs->tree);
939 	init_rwsem(&ctx->devs->rwsem);
940 	erofs_default_options(ctx);
941 	fc->ops = &erofs_context_ops;
942 	return 0;
943 }
944 
945 /*
946  * could be triggered after deactivate_locked_super()
947  * is called, thus including umount and failed to initialize.
948  */
949 static void erofs_kill_sb(struct super_block *sb)
950 {
951 	struct erofs_sb_info *sbi;
952 
953 	WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
954 
955 	/* pseudo mount for anon inodes */
956 	if (sb->s_flags & SB_KERNMOUNT) {
957 		kill_anon_super(sb);
958 		return;
959 	}
960 
961 	if (erofs_is_fscache_mode(sb))
962 		kill_anon_super(sb);
963 	else
964 		kill_block_super(sb);
965 
966 	sbi = EROFS_SB(sb);
967 	if (!sbi)
968 		return;
969 
970 	erofs_free_dev_context(sbi->devs);
971 	fs_put_dax(sbi->dax_dev, NULL);
972 	erofs_fscache_unregister_fs(sb);
973 	kfree(sbi->fsid);
974 	kfree(sbi->domain_id);
975 	kfree(sbi);
976 	sb->s_fs_info = NULL;
977 }
978 
979 /* called when ->s_root is non-NULL */
980 static void erofs_put_super(struct super_block *sb)
981 {
982 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
983 
984 	DBG_BUGON(!sbi);
985 
986 	erofs_unregister_sysfs(sb);
987 	erofs_shrinker_unregister(sb);
988 	erofs_xattr_prefixes_cleanup(sb);
989 #ifdef CONFIG_EROFS_FS_ZIP
990 	iput(sbi->managed_cache);
991 	sbi->managed_cache = NULL;
992 #endif
993 	iput(sbi->packed_inode);
994 	sbi->packed_inode = NULL;
995 	erofs_free_dev_context(sbi->devs);
996 	sbi->devs = NULL;
997 	erofs_fscache_unregister_fs(sb);
998 }
999 
1000 struct file_system_type erofs_fs_type = {
1001 	.owner          = THIS_MODULE,
1002 	.name           = "erofs",
1003 	.init_fs_context = erofs_init_fs_context,
1004 	.kill_sb        = erofs_kill_sb,
1005 	.fs_flags       = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
1006 };
1007 MODULE_ALIAS_FS("erofs");
1008 
1009 static int __init erofs_module_init(void)
1010 {
1011 	int err;
1012 
1013 	erofs_check_ondisk_layout_definitions();
1014 
1015 	erofs_inode_cachep = kmem_cache_create("erofs_inode",
1016 					       sizeof(struct erofs_inode), 0,
1017 					       SLAB_RECLAIM_ACCOUNT,
1018 					       erofs_inode_init_once);
1019 	if (!erofs_inode_cachep) {
1020 		err = -ENOMEM;
1021 		goto icache_err;
1022 	}
1023 
1024 	err = erofs_init_shrinker();
1025 	if (err)
1026 		goto shrinker_err;
1027 
1028 	err = z_erofs_lzma_init();
1029 	if (err)
1030 		goto lzma_err;
1031 
1032 	erofs_pcpubuf_init();
1033 	err = z_erofs_init_zip_subsystem();
1034 	if (err)
1035 		goto zip_err;
1036 
1037 	err = erofs_init_sysfs();
1038 	if (err)
1039 		goto sysfs_err;
1040 
1041 	err = register_filesystem(&erofs_fs_type);
1042 	if (err)
1043 		goto fs_err;
1044 
1045 	return 0;
1046 
1047 fs_err:
1048 	erofs_exit_sysfs();
1049 sysfs_err:
1050 	z_erofs_exit_zip_subsystem();
1051 zip_err:
1052 	z_erofs_lzma_exit();
1053 lzma_err:
1054 	erofs_exit_shrinker();
1055 shrinker_err:
1056 	kmem_cache_destroy(erofs_inode_cachep);
1057 icache_err:
1058 	return err;
1059 }
1060 
1061 static void __exit erofs_module_exit(void)
1062 {
1063 	unregister_filesystem(&erofs_fs_type);
1064 
1065 	/* Ensure all RCU free inodes / pclusters are safe to be destroyed. */
1066 	rcu_barrier();
1067 
1068 	erofs_exit_sysfs();
1069 	z_erofs_exit_zip_subsystem();
1070 	z_erofs_lzma_exit();
1071 	erofs_exit_shrinker();
1072 	kmem_cache_destroy(erofs_inode_cachep);
1073 	erofs_pcpubuf_exit();
1074 }
1075 
1076 /* get filesystem statistics */
1077 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
1078 {
1079 	struct super_block *sb = dentry->d_sb;
1080 	struct erofs_sb_info *sbi = EROFS_SB(sb);
1081 	u64 id = 0;
1082 
1083 	if (!erofs_is_fscache_mode(sb))
1084 		id = huge_encode_dev(sb->s_bdev->bd_dev);
1085 
1086 	buf->f_type = sb->s_magic;
1087 	buf->f_bsize = sb->s_blocksize;
1088 	buf->f_blocks = sbi->total_blocks;
1089 	buf->f_bfree = buf->f_bavail = 0;
1090 
1091 	buf->f_files = ULLONG_MAX;
1092 	buf->f_ffree = ULLONG_MAX - sbi->inos;
1093 
1094 	buf->f_namelen = EROFS_NAME_LEN;
1095 
1096 	buf->f_fsid    = u64_to_fsid(id);
1097 	return 0;
1098 }
1099 
1100 static int erofs_show_options(struct seq_file *seq, struct dentry *root)
1101 {
1102 	struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
1103 	struct erofs_mount_opts *opt = &sbi->opt;
1104 
1105 #ifdef CONFIG_EROFS_FS_XATTR
1106 	if (test_opt(opt, XATTR_USER))
1107 		seq_puts(seq, ",user_xattr");
1108 	else
1109 		seq_puts(seq, ",nouser_xattr");
1110 #endif
1111 #ifdef CONFIG_EROFS_FS_POSIX_ACL
1112 	if (test_opt(opt, POSIX_ACL))
1113 		seq_puts(seq, ",acl");
1114 	else
1115 		seq_puts(seq, ",noacl");
1116 #endif
1117 #ifdef CONFIG_EROFS_FS_ZIP
1118 	if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
1119 		seq_puts(seq, ",cache_strategy=disabled");
1120 	else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
1121 		seq_puts(seq, ",cache_strategy=readahead");
1122 	else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
1123 		seq_puts(seq, ",cache_strategy=readaround");
1124 #endif
1125 	if (test_opt(opt, DAX_ALWAYS))
1126 		seq_puts(seq, ",dax=always");
1127 	if (test_opt(opt, DAX_NEVER))
1128 		seq_puts(seq, ",dax=never");
1129 #ifdef CONFIG_EROFS_FS_ONDEMAND
1130 	if (sbi->fsid)
1131 		seq_printf(seq, ",fsid=%s", sbi->fsid);
1132 	if (sbi->domain_id)
1133 		seq_printf(seq, ",domain_id=%s", sbi->domain_id);
1134 #endif
1135 	return 0;
1136 }
1137 
1138 const struct super_operations erofs_sops = {
1139 	.put_super = erofs_put_super,
1140 	.alloc_inode = erofs_alloc_inode,
1141 	.free_inode = erofs_free_inode,
1142 	.statfs = erofs_statfs,
1143 	.show_options = erofs_show_options,
1144 };
1145 
1146 module_init(erofs_module_init);
1147 module_exit(erofs_module_exit);
1148 
1149 MODULE_DESCRIPTION("Enhanced ROM File System");
1150 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
1151 MODULE_LICENSE("GPL");
1152