1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2021, Alibaba Cloud 6 */ 7 #ifndef __EROFS_INTERNAL_H 8 #define __EROFS_INTERNAL_H 9 10 #include <linux/fs.h> 11 #include <linux/dax.h> 12 #include <linux/dcache.h> 13 #include <linux/mm.h> 14 #include <linux/module.h> 15 #include <linux/pagemap.h> 16 #include <linux/bio.h> 17 #include <linux/magic.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/iomap.h> 21 #include "erofs_fs.h" 22 23 __printf(2, 3) void _erofs_printk(struct super_block *sb, const char *fmt, ...); 24 #define erofs_err(sb, fmt, ...) \ 25 _erofs_printk(sb, KERN_ERR fmt "\n", ##__VA_ARGS__) 26 #define erofs_info(sb, fmt, ...) \ 27 _erofs_printk(sb, KERN_INFO fmt "\n", ##__VA_ARGS__) 28 29 #ifdef CONFIG_EROFS_FS_DEBUG 30 #define DBG_BUGON BUG_ON 31 #else 32 #define DBG_BUGON(x) ((void)(x)) 33 #endif /* !CONFIG_EROFS_FS_DEBUG */ 34 35 /* EROFS_SUPER_MAGIC_V1 to represent the whole file system */ 36 #define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1 37 38 typedef u64 erofs_nid_t; 39 typedef u64 erofs_off_t; 40 /* data type for filesystem-wide blocks number */ 41 typedef u32 erofs_blk_t; 42 43 struct erofs_device_info { 44 char *path; 45 struct erofs_fscache *fscache; 46 struct file *file; 47 struct dax_device *dax_dev; 48 u64 dax_part_off; 49 50 u32 blocks; 51 u32 mapped_blkaddr; 52 }; 53 54 enum { 55 EROFS_SYNC_DECOMPRESS_AUTO, 56 EROFS_SYNC_DECOMPRESS_FORCE_ON, 57 EROFS_SYNC_DECOMPRESS_FORCE_OFF 58 }; 59 60 struct erofs_mount_opts { 61 /* current strategy of how to use managed cache */ 62 unsigned char cache_strategy; 63 /* strategy of sync decompression (0 - auto, 1 - force on, 2 - force off) */ 64 unsigned int sync_decompress; 65 /* threshold for decompression synchronously */ 66 unsigned int max_sync_decompress_pages; 67 unsigned int mount_opt; 68 }; 69 70 struct erofs_dev_context { 71 struct idr tree; 72 struct rw_semaphore rwsem; 73 74 unsigned int extra_devices; 75 bool flatdev; 76 }; 77 78 /* all filesystem-wide lz4 configurations */ 79 struct erofs_sb_lz4_info { 80 /* # of pages needed for EROFS lz4 rolling decompression */ 81 u16 max_distance_pages; 82 /* maximum possible blocks for pclusters in the filesystem */ 83 u16 max_pclusterblks; 84 }; 85 86 struct erofs_domain { 87 refcount_t ref; 88 struct list_head list; 89 struct fscache_volume *volume; 90 char *domain_id; 91 }; 92 93 struct erofs_fscache { 94 struct fscache_cookie *cookie; 95 struct inode *inode; /* anonymous inode for the blob */ 96 97 /* used for share domain mode */ 98 struct erofs_domain *domain; 99 struct list_head node; 100 refcount_t ref; 101 char *name; 102 }; 103 104 struct erofs_xattr_prefix_item { 105 struct erofs_xattr_long_prefix *prefix; 106 u8 infix_len; 107 }; 108 109 struct erofs_sb_info { 110 struct erofs_device_info dif0; 111 struct erofs_mount_opts opt; /* options */ 112 #ifdef CONFIG_EROFS_FS_ZIP 113 /* list for all registered superblocks, mainly for shrinker */ 114 struct list_head list; 115 struct mutex umount_mutex; 116 117 /* managed XArray arranged in physical block number */ 118 struct xarray managed_pslots; 119 120 unsigned int shrinker_run_no; 121 u16 available_compr_algs; 122 123 /* pseudo inode to manage cached pages */ 124 struct inode *managed_cache; 125 126 struct erofs_sb_lz4_info lz4; 127 #endif /* CONFIG_EROFS_FS_ZIP */ 128 struct inode *packed_inode; 129 struct erofs_dev_context *devs; 130 u64 total_blocks; 131 132 u32 meta_blkaddr; 133 #ifdef CONFIG_EROFS_FS_XATTR 134 u32 xattr_blkaddr; 135 u32 xattr_prefix_start; 136 u8 xattr_prefix_count; 137 struct erofs_xattr_prefix_item *xattr_prefixes; 138 unsigned int xattr_filter_reserved; 139 #endif 140 u16 device_id_mask; /* valid bits of device id to be used */ 141 142 unsigned char islotbits; /* inode slot unit size in bit shift */ 143 unsigned char blkszbits; /* filesystem block size in bit shift */ 144 145 u32 sb_size; /* total superblock size */ 146 u32 build_time_nsec; 147 u64 build_time; 148 149 /* what we really care is nid, rather than ino.. */ 150 erofs_nid_t root_nid; 151 erofs_nid_t packed_nid; 152 /* used for statfs, f_files - f_favail */ 153 u64 inos; 154 155 u8 uuid[16]; /* 128-bit uuid for volume */ 156 u8 volume_name[16]; /* volume name */ 157 u32 feature_compat; 158 u32 feature_incompat; 159 160 /* sysfs support */ 161 struct kobject s_kobj; /* /sys/fs/erofs/<devname> */ 162 struct completion s_kobj_unregister; 163 164 /* fscache support */ 165 struct fscache_volume *volume; 166 struct erofs_domain *domain; 167 char *fsid; 168 char *domain_id; 169 }; 170 171 #define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info) 172 #define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info) 173 174 /* Mount flags set via mount options or defaults */ 175 #define EROFS_MOUNT_XATTR_USER 0x00000010 176 #define EROFS_MOUNT_POSIX_ACL 0x00000020 177 #define EROFS_MOUNT_DAX_ALWAYS 0x00000040 178 #define EROFS_MOUNT_DAX_NEVER 0x00000080 179 #define EROFS_MOUNT_DIRECT_IO 0x00000100 180 181 #define clear_opt(opt, option) ((opt)->mount_opt &= ~EROFS_MOUNT_##option) 182 #define set_opt(opt, option) ((opt)->mount_opt |= EROFS_MOUNT_##option) 183 #define test_opt(opt, option) ((opt)->mount_opt & EROFS_MOUNT_##option) 184 185 static inline bool erofs_is_fileio_mode(struct erofs_sb_info *sbi) 186 { 187 return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && sbi->dif0.file; 188 } 189 190 static inline bool erofs_is_fscache_mode(struct super_block *sb) 191 { 192 return IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && 193 !erofs_is_fileio_mode(EROFS_SB(sb)) && !sb->s_bdev; 194 } 195 196 enum { 197 EROFS_ZIP_CACHE_DISABLED, 198 EROFS_ZIP_CACHE_READAHEAD, 199 EROFS_ZIP_CACHE_READAROUND 200 }; 201 202 enum erofs_kmap_type { 203 EROFS_NO_KMAP, /* don't map the buffer */ 204 EROFS_KMAP, /* use kmap_local_page() to map the buffer */ 205 }; 206 207 struct erofs_buf { 208 struct address_space *mapping; 209 struct file *file; 210 struct page *page; 211 void *base; 212 }; 213 #define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL }) 214 215 #define erofs_blknr(sb, addr) ((erofs_blk_t)((addr) >> (sb)->s_blocksize_bits)) 216 #define erofs_blkoff(sb, addr) ((addr) & ((sb)->s_blocksize - 1)) 217 #define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits) 218 #define erofs_iblks(i) (round_up((i)->i_size, i_blocksize(i)) >> (i)->i_blkbits) 219 220 #define EROFS_FEATURE_FUNCS(name, compat, feature) \ 221 static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \ 222 { \ 223 return sbi->feature_##compat & EROFS_FEATURE_##feature; \ 224 } 225 226 EROFS_FEATURE_FUNCS(zero_padding, incompat, INCOMPAT_ZERO_PADDING) 227 EROFS_FEATURE_FUNCS(compr_cfgs, incompat, INCOMPAT_COMPR_CFGS) 228 EROFS_FEATURE_FUNCS(big_pcluster, incompat, INCOMPAT_BIG_PCLUSTER) 229 EROFS_FEATURE_FUNCS(chunked_file, incompat, INCOMPAT_CHUNKED_FILE) 230 EROFS_FEATURE_FUNCS(device_table, incompat, INCOMPAT_DEVICE_TABLE) 231 EROFS_FEATURE_FUNCS(compr_head2, incompat, INCOMPAT_COMPR_HEAD2) 232 EROFS_FEATURE_FUNCS(ztailpacking, incompat, INCOMPAT_ZTAILPACKING) 233 EROFS_FEATURE_FUNCS(fragments, incompat, INCOMPAT_FRAGMENTS) 234 EROFS_FEATURE_FUNCS(dedupe, incompat, INCOMPAT_DEDUPE) 235 EROFS_FEATURE_FUNCS(xattr_prefixes, incompat, INCOMPAT_XATTR_PREFIXES) 236 EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM) 237 EROFS_FEATURE_FUNCS(xattr_filter, compat, COMPAT_XATTR_FILTER) 238 239 /* atomic flag definitions */ 240 #define EROFS_I_EA_INITED_BIT 0 241 #define EROFS_I_Z_INITED_BIT 1 242 243 /* bitlock definitions (arranged in reverse order) */ 244 #define EROFS_I_BL_XATTR_BIT (BITS_PER_LONG - 1) 245 #define EROFS_I_BL_Z_BIT (BITS_PER_LONG - 2) 246 247 struct erofs_inode { 248 erofs_nid_t nid; 249 250 /* atomic flags (including bitlocks) */ 251 unsigned long flags; 252 253 unsigned char datalayout; 254 unsigned char inode_isize; 255 unsigned int xattr_isize; 256 257 unsigned int xattr_name_filter; 258 unsigned int xattr_shared_count; 259 unsigned int *xattr_shared_xattrs; 260 261 union { 262 erofs_blk_t raw_blkaddr; 263 struct { 264 unsigned short chunkformat; 265 unsigned char chunkbits; 266 }; 267 #ifdef CONFIG_EROFS_FS_ZIP 268 struct { 269 unsigned short z_advise; 270 unsigned char z_algorithmtype[2]; 271 unsigned char z_logical_clusterbits; 272 unsigned long z_tailextent_headlcn; 273 union { 274 struct { 275 erofs_off_t z_idataoff; 276 unsigned short z_idata_size; 277 }; 278 erofs_off_t z_fragmentoff; 279 }; 280 }; 281 #endif /* CONFIG_EROFS_FS_ZIP */ 282 }; 283 /* the corresponding vfs inode */ 284 struct inode vfs_inode; 285 }; 286 287 #define EROFS_I(ptr) container_of(ptr, struct erofs_inode, vfs_inode) 288 289 static inline erofs_off_t erofs_iloc(struct inode *inode) 290 { 291 struct erofs_sb_info *sbi = EROFS_I_SB(inode); 292 293 return erofs_pos(inode->i_sb, sbi->meta_blkaddr) + 294 (EROFS_I(inode)->nid << sbi->islotbits); 295 } 296 297 static inline unsigned int erofs_inode_version(unsigned int ifmt) 298 { 299 return (ifmt >> EROFS_I_VERSION_BIT) & EROFS_I_VERSION_MASK; 300 } 301 302 static inline unsigned int erofs_inode_datalayout(unsigned int ifmt) 303 { 304 return (ifmt >> EROFS_I_DATALAYOUT_BIT) & EROFS_I_DATALAYOUT_MASK; 305 } 306 307 /* reclaiming is never triggered when allocating new folios. */ 308 static inline struct folio *erofs_grab_folio_nowait(struct address_space *as, 309 pgoff_t index) 310 { 311 return __filemap_get_folio(as, index, 312 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, 313 readahead_gfp_mask(as) & ~__GFP_RECLAIM); 314 } 315 316 /* Has a disk mapping */ 317 #define EROFS_MAP_MAPPED 0x0001 318 /* Located in metadata (could be copied from bd_inode) */ 319 #define EROFS_MAP_META 0x0002 320 /* The extent is encoded */ 321 #define EROFS_MAP_ENCODED 0x0004 322 /* The length of extent is full */ 323 #define EROFS_MAP_FULL_MAPPED 0x0008 324 /* Located in the special packed inode */ 325 #define EROFS_MAP_FRAGMENT 0x0010 326 /* The extent refers to partial decompressed data */ 327 #define EROFS_MAP_PARTIAL_REF 0x0020 328 329 struct erofs_map_blocks { 330 struct erofs_buf buf; 331 332 erofs_off_t m_pa, m_la; 333 u64 m_plen, m_llen; 334 335 unsigned short m_deviceid; 336 char m_algorithmformat; 337 unsigned int m_flags; 338 }; 339 340 /* 341 * Used to get the exact decompressed length, e.g. fiemap (consider lookback 342 * approach instead if possible since it's more metadata lightweight.) 343 */ 344 #define EROFS_GET_BLOCKS_FIEMAP 0x0001 345 /* Used to map the whole extent if non-negligible data is requested for LZMA */ 346 #define EROFS_GET_BLOCKS_READMORE 0x0002 347 /* Used to map tail extent for tailpacking inline or fragment pcluster */ 348 #define EROFS_GET_BLOCKS_FINDTAIL 0x0004 349 350 enum { 351 Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX, 352 Z_EROFS_COMPRESSION_INTERLACED, 353 Z_EROFS_COMPRESSION_RUNTIME_MAX 354 }; 355 356 struct erofs_map_dev { 357 struct super_block *m_sb; 358 struct erofs_device_info *m_dif; 359 struct block_device *m_bdev; 360 361 erofs_off_t m_pa; 362 unsigned int m_deviceid; 363 }; 364 365 extern const struct super_operations erofs_sops; 366 367 extern const struct address_space_operations erofs_aops; 368 extern const struct address_space_operations erofs_fileio_aops; 369 extern const struct address_space_operations z_erofs_aops; 370 extern const struct address_space_operations erofs_fscache_access_aops; 371 372 extern const struct inode_operations erofs_generic_iops; 373 extern const struct inode_operations erofs_symlink_iops; 374 extern const struct inode_operations erofs_fast_symlink_iops; 375 extern const struct inode_operations erofs_dir_iops; 376 377 extern const struct file_operations erofs_file_fops; 378 extern const struct file_operations erofs_dir_fops; 379 380 extern const struct iomap_ops z_erofs_iomap_report_ops; 381 382 /* flags for erofs_fscache_register_cookie() */ 383 #define EROFS_REG_COOKIE_SHARE 0x0001 384 #define EROFS_REG_COOKIE_NEED_NOEXIST 0x0002 385 386 void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, 387 erofs_off_t *offset, int *lengthp); 388 void erofs_unmap_metabuf(struct erofs_buf *buf); 389 void erofs_put_metabuf(struct erofs_buf *buf); 390 void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, 391 enum erofs_kmap_type type); 392 void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb); 393 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, 394 erofs_off_t offset, enum erofs_kmap_type type); 395 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev); 396 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 397 u64 start, u64 len); 398 int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map); 399 void erofs_onlinefolio_init(struct folio *folio); 400 void erofs_onlinefolio_split(struct folio *folio); 401 void erofs_onlinefolio_end(struct folio *folio, int err); 402 struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid); 403 int erofs_getattr(struct mnt_idmap *idmap, const struct path *path, 404 struct kstat *stat, u32 request_mask, 405 unsigned int query_flags); 406 int erofs_namei(struct inode *dir, const struct qstr *name, 407 erofs_nid_t *nid, unsigned int *d_type); 408 409 static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count) 410 { 411 int retried = 0; 412 413 while (1) { 414 void *p = vm_map_ram(pages, count, -1); 415 416 /* retry two more times (totally 3 times) */ 417 if (p || ++retried >= 3) 418 return p; 419 vm_unmap_aliases(); 420 } 421 return NULL; 422 } 423 424 int erofs_register_sysfs(struct super_block *sb); 425 void erofs_unregister_sysfs(struct super_block *sb); 426 int __init erofs_init_sysfs(void); 427 void erofs_exit_sysfs(void); 428 429 struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv); 430 static inline struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp) 431 { 432 return __erofs_allocpage(pagepool, gfp, false); 433 } 434 static inline void erofs_pagepool_add(struct page **pagepool, struct page *page) 435 { 436 set_page_private(page, (unsigned long)*pagepool); 437 *pagepool = page; 438 } 439 void erofs_release_pages(struct page **pagepool); 440 441 #ifdef CONFIG_EROFS_FS_ZIP 442 #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) 443 444 extern atomic_long_t erofs_global_shrink_cnt; 445 void erofs_shrinker_register(struct super_block *sb); 446 void erofs_shrinker_unregister(struct super_block *sb); 447 int __init erofs_init_shrinker(void); 448 void erofs_exit_shrinker(void); 449 int __init z_erofs_init_subsystem(void); 450 void z_erofs_exit_subsystem(void); 451 unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, 452 unsigned long nr_shrink); 453 int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, 454 int flags); 455 void *z_erofs_get_gbuf(unsigned int requiredpages); 456 void z_erofs_put_gbuf(void *ptr); 457 int z_erofs_gbuf_growsize(unsigned int nrpages); 458 int __init z_erofs_gbuf_init(void); 459 void z_erofs_gbuf_exit(void); 460 int erofs_init_managed_cache(struct super_block *sb); 461 int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb); 462 #else 463 static inline void erofs_shrinker_register(struct super_block *sb) {} 464 static inline void erofs_shrinker_unregister(struct super_block *sb) {} 465 static inline int erofs_init_shrinker(void) { return 0; } 466 static inline void erofs_exit_shrinker(void) {} 467 static inline int z_erofs_init_subsystem(void) { return 0; } 468 static inline void z_erofs_exit_subsystem(void) {} 469 static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; } 470 #endif /* !CONFIG_EROFS_FS_ZIP */ 471 472 #ifdef CONFIG_EROFS_FS_BACKED_BY_FILE 473 struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev); 474 void erofs_fileio_submit_bio(struct bio *bio); 475 #else 476 static inline struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev) { return NULL; } 477 static inline void erofs_fileio_submit_bio(struct bio *bio) {} 478 #endif 479 480 #ifdef CONFIG_EROFS_FS_ONDEMAND 481 int erofs_fscache_register_fs(struct super_block *sb); 482 void erofs_fscache_unregister_fs(struct super_block *sb); 483 484 struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb, 485 char *name, unsigned int flags); 486 void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache); 487 struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev); 488 void erofs_fscache_submit_bio(struct bio *bio); 489 #else 490 static inline int erofs_fscache_register_fs(struct super_block *sb) 491 { 492 return -EOPNOTSUPP; 493 } 494 static inline void erofs_fscache_unregister_fs(struct super_block *sb) {} 495 496 static inline 497 struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb, 498 char *name, unsigned int flags) 499 { 500 return ERR_PTR(-EOPNOTSUPP); 501 } 502 503 static inline void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache) 504 { 505 } 506 static inline struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev) { return NULL; } 507 static inline void erofs_fscache_submit_bio(struct bio *bio) {} 508 #endif 509 510 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 511 512 #endif /* __EROFS_INTERNAL_H */ 513