1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2021, Alibaba Cloud 6 */ 7 #include "xattr.h" 8 #include <linux/compat.h> 9 #include <trace/events/erofs.h> 10 11 static int erofs_fill_symlink(struct inode *inode, void *bptr, unsigned int ofs) 12 { 13 struct erofs_inode *vi = EROFS_I(inode); 14 char *link; 15 loff_t end; 16 17 ofs += vi->xattr_isize; 18 /* check whether the symlink data is small enough to be inlined */ 19 if (vi->datalayout == EROFS_INODE_FLAT_INLINE && 20 !check_add_overflow(ofs, inode->i_size, &end) && 21 end <= i_blocksize(inode)) { 22 link = kmemdup_nul(bptr + ofs, inode->i_size, GFP_KERNEL); 23 if (!link) 24 return -ENOMEM; 25 if (unlikely(!inode->i_size || strlen(link) != inode->i_size)) { 26 erofs_err(inode->i_sb, "invalid fast symlink size %llu @ nid %llu", 27 inode->i_size | 0ULL, vi->nid); 28 kfree(link); 29 return -EFSCORRUPTED; 30 } 31 inode_set_cached_link(inode, link, inode->i_size); 32 } 33 return 0; 34 } 35 36 static int erofs_read_inode(struct inode *inode) 37 { 38 struct super_block *sb = inode->i_sb; 39 erofs_blk_t blkaddr = erofs_blknr(sb, erofs_iloc(inode)); 40 unsigned int ofs = erofs_blkoff(sb, erofs_iloc(inode)); 41 bool in_mbox = erofs_inode_in_metabox(inode); 42 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 43 struct erofs_sb_info *sbi = EROFS_SB(sb); 44 erofs_blk_t addrmask = BIT_ULL(48) - 1; 45 struct erofs_inode *vi = EROFS_I(inode); 46 struct erofs_inode_extended *die, copied; 47 struct erofs_inode_compact *dic; 48 unsigned int ifmt; 49 void *ptr; 50 int err = 0; 51 52 ptr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr), in_mbox); 53 if (IS_ERR(ptr)) { 54 err = PTR_ERR(ptr); 55 erofs_err(sb, "failed to read inode meta block (nid: %llu): %d", 56 vi->nid, err); 57 goto err_out; 58 } 59 60 dic = ptr + ofs; 61 ifmt = le16_to_cpu(dic->i_format); 62 if (ifmt & ~EROFS_I_ALL) { 63 erofs_err(sb, "unsupported i_format %u of nid %llu", 64 ifmt, vi->nid); 65 err = -EOPNOTSUPP; 66 goto err_out; 67 } 68 69 vi->datalayout = erofs_inode_datalayout(ifmt); 70 if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) { 71 erofs_err(sb, "unsupported datalayout %u of nid %llu", 72 vi->datalayout, vi->nid); 73 err = -EOPNOTSUPP; 74 goto err_out; 75 } 76 77 switch (erofs_inode_version(ifmt)) { 78 case EROFS_INODE_LAYOUT_EXTENDED: 79 vi->inode_isize = sizeof(struct erofs_inode_extended); 80 /* check if the extended inode acrosses block boundary */ 81 if (ofs + vi->inode_isize <= sb->s_blocksize) { 82 ofs += vi->inode_isize; 83 die = (struct erofs_inode_extended *)dic; 84 copied.i_u = die->i_u; 85 copied.i_nb = die->i_nb; 86 } else { 87 const unsigned int gotten = sb->s_blocksize - ofs; 88 89 memcpy(&copied, dic, gotten); 90 ptr = erofs_read_metabuf(&buf, sb, 91 erofs_pos(sb, blkaddr + 1), in_mbox); 92 if (IS_ERR(ptr)) { 93 err = PTR_ERR(ptr); 94 erofs_err(sb, "failed to read inode payload block (nid: %llu): %d", 95 vi->nid, err); 96 goto err_out; 97 } 98 ofs = vi->inode_isize - gotten; 99 memcpy((u8 *)&copied + gotten, ptr, ofs); 100 die = &copied; 101 } 102 vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount); 103 104 inode->i_mode = le16_to_cpu(die->i_mode); 105 i_uid_write(inode, le32_to_cpu(die->i_uid)); 106 i_gid_write(inode, le32_to_cpu(die->i_gid)); 107 set_nlink(inode, le32_to_cpu(die->i_nlink)); 108 inode_set_mtime(inode, le64_to_cpu(die->i_mtime), 109 le32_to_cpu(die->i_mtime_nsec)); 110 111 inode->i_size = le64_to_cpu(die->i_size); 112 break; 113 case EROFS_INODE_LAYOUT_COMPACT: 114 vi->inode_isize = sizeof(struct erofs_inode_compact); 115 ofs += vi->inode_isize; 116 vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount); 117 118 inode->i_mode = le16_to_cpu(dic->i_mode); 119 copied.i_u = dic->i_u; 120 i_uid_write(inode, le16_to_cpu(dic->i_uid)); 121 i_gid_write(inode, le16_to_cpu(dic->i_gid)); 122 if (!S_ISDIR(inode->i_mode) && 123 ((ifmt >> EROFS_I_NLINK_1_BIT) & 1)) { 124 set_nlink(inode, 1); 125 copied.i_nb = dic->i_nb; 126 } else { 127 set_nlink(inode, le16_to_cpu(dic->i_nb.nlink)); 128 copied.i_nb.startblk_hi = 0; 129 addrmask = BIT_ULL(32) - 1; 130 } 131 inode_set_mtime(inode, sbi->epoch + le32_to_cpu(dic->i_mtime), 132 sbi->fixed_nsec); 133 134 inode->i_size = le32_to_cpu(dic->i_size); 135 break; 136 default: 137 erofs_err(sb, "unsupported on-disk inode version %u of nid %llu", 138 erofs_inode_version(ifmt), vi->nid); 139 err = -EOPNOTSUPP; 140 goto err_out; 141 } 142 143 if (unlikely(inode->i_size < 0)) { 144 erofs_err(sb, "negative i_size @ nid %llu", vi->nid); 145 err = -EFSCORRUPTED; 146 goto err_out; 147 } 148 149 if (IS_ENABLED(CONFIG_EROFS_FS_POSIX_ACL) && 150 erofs_inode_has_noacl(inode, ptr, ofs)) 151 cache_no_acl(inode); 152 153 switch (inode->i_mode & S_IFMT) { 154 case S_IFDIR: 155 vi->dot_omitted = (ifmt >> EROFS_I_DOT_OMITTED_BIT) & 1; 156 fallthrough; 157 case S_IFREG: 158 case S_IFLNK: 159 vi->startblk = le32_to_cpu(copied.i_u.startblk_lo) | 160 ((u64)le16_to_cpu(copied.i_nb.startblk_hi) << 32); 161 if (vi->datalayout == EROFS_INODE_FLAT_PLAIN && 162 !((vi->startblk ^ EROFS_NULL_ADDR) & addrmask)) 163 vi->startblk = EROFS_NULL_ADDR; 164 165 if(S_ISLNK(inode->i_mode)) { 166 err = erofs_fill_symlink(inode, ptr, ofs); 167 if (err) 168 goto err_out; 169 } 170 break; 171 case S_IFCHR: 172 case S_IFBLK: 173 inode->i_rdev = new_decode_dev(le32_to_cpu(copied.i_u.rdev)); 174 break; 175 case S_IFIFO: 176 case S_IFSOCK: 177 inode->i_rdev = 0; 178 break; 179 default: 180 erofs_err(sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode, 181 vi->nid); 182 err = -EFSCORRUPTED; 183 goto err_out; 184 } 185 186 if (!erofs_inode_is_data_compressed(vi->datalayout)) { 187 inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9; 188 } else if (!IS_ENABLED(CONFIG_EROFS_FS_ZIP) || !sbi->available_compr_algs) { 189 erofs_err(sb, "compressed inode (nid %llu) is invalid in a plain filesystem", 190 vi->nid); 191 err = -EFSCORRUPTED; 192 goto err_out; 193 } else { 194 inode->i_blocks = le32_to_cpu(copied.i_u.blocks_lo) << 195 (sb->s_blocksize_bits - 9); 196 } 197 198 if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { 199 /* fill chunked inode summary info */ 200 vi->chunkformat = le16_to_cpu(copied.i_u.c.format); 201 if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) { 202 erofs_err(sb, "unsupported chunk format %x of nid %llu", 203 vi->chunkformat, vi->nid); 204 err = -EOPNOTSUPP; 205 goto err_out; 206 } 207 vi->chunkbits = sb->s_blocksize_bits + 208 (vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK); 209 } 210 inode_set_atime_to_ts(inode, 211 inode_set_ctime_to_ts(inode, inode_get_mtime(inode))); 212 213 inode->i_flags &= ~S_DAX; 214 if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) && 215 (vi->datalayout == EROFS_INODE_FLAT_PLAIN || 216 vi->datalayout == EROFS_INODE_CHUNK_BASED)) 217 inode->i_flags |= S_DAX; 218 err_out: 219 erofs_put_metabuf(&buf); 220 return err; 221 } 222 223 static int erofs_fill_inode(struct inode *inode) 224 { 225 int err; 226 227 trace_erofs_fill_inode(inode); 228 err = erofs_read_inode(inode); 229 if (err) 230 return err; 231 232 switch (inode->i_mode & S_IFMT) { 233 case S_IFREG: 234 inode->i_op = &erofs_generic_iops; 235 inode->i_fop = erofs_ishare_fill_inode(inode) ? 236 &erofs_ishare_fops : &erofs_file_fops; 237 break; 238 case S_IFDIR: 239 inode->i_op = &erofs_dir_iops; 240 inode->i_fop = &erofs_dir_fops; 241 inode_nohighmem(inode); 242 break; 243 case S_IFLNK: 244 if (inode->i_link) 245 inode->i_op = &erofs_fast_symlink_iops; 246 else 247 inode->i_op = &erofs_symlink_iops; 248 inode_nohighmem(inode); 249 break; 250 default: 251 inode->i_op = &erofs_generic_iops; 252 init_special_inode(inode, inode->i_mode, inode->i_rdev); 253 return 0; 254 } 255 256 mapping_set_large_folios(inode->i_mapping); 257 return erofs_inode_set_aops(inode, inode, false); 258 } 259 260 /* 261 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down 262 * so that it will fit. 263 */ 264 static ino_t erofs_squash_ino(struct super_block *sb, erofs_nid_t nid) 265 { 266 u64 ino64 = erofs_nid_to_ino64(EROFS_SB(sb), nid); 267 268 if (sizeof(ino_t) < sizeof(erofs_nid_t)) 269 ino64 ^= ino64 >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8; 270 return (ino_t)ino64; 271 } 272 273 static int erofs_iget5_eq(struct inode *inode, void *opaque) 274 { 275 return EROFS_I(inode)->nid == *(erofs_nid_t *)opaque; 276 } 277 278 static int erofs_iget5_set(struct inode *inode, void *opaque) 279 { 280 const erofs_nid_t nid = *(erofs_nid_t *)opaque; 281 282 inode->i_ino = erofs_squash_ino(inode->i_sb, nid); 283 EROFS_I(inode)->nid = nid; 284 return 0; 285 } 286 287 struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid) 288 { 289 struct inode *inode; 290 291 inode = iget5_locked(sb, erofs_squash_ino(sb, nid), erofs_iget5_eq, 292 erofs_iget5_set, &nid); 293 if (!inode) 294 return ERR_PTR(-ENOMEM); 295 296 if (inode_state_read_once(inode) & I_NEW) { 297 int err = erofs_fill_inode(inode); 298 299 if (err) { 300 iget_failed(inode); 301 return ERR_PTR(err); 302 } 303 unlock_new_inode(inode); 304 } 305 return inode; 306 } 307 308 int erofs_getattr(struct mnt_idmap *idmap, const struct path *path, 309 struct kstat *stat, u32 request_mask, 310 unsigned int query_flags) 311 { 312 struct inode *const inode = d_inode(path->dentry); 313 struct block_device *bdev = inode->i_sb->s_bdev; 314 bool compressed = 315 erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout); 316 317 if (compressed) 318 stat->attributes |= STATX_ATTR_COMPRESSED; 319 stat->attributes |= STATX_ATTR_IMMUTABLE; 320 stat->attributes_mask |= (STATX_ATTR_COMPRESSED | 321 STATX_ATTR_IMMUTABLE); 322 323 /* 324 * Return the DIO alignment restrictions if requested. 325 * 326 * In EROFS, STATX_DIOALIGN is only supported in bdev-based mode 327 * and uncompressed inodes, otherwise we report no DIO support. 328 */ 329 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) { 330 stat->result_mask |= STATX_DIOALIGN; 331 if (bdev && !compressed) { 332 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; 333 stat->dio_offset_align = bdev_logical_block_size(bdev); 334 } 335 } 336 generic_fillattr(idmap, request_mask, inode, stat); 337 return 0; 338 } 339 340 static int erofs_ioctl_get_volume_label(struct inode *inode, void __user *arg) 341 { 342 struct erofs_sb_info *sbi = EROFS_I_SB(inode); 343 int ret; 344 345 if (!sbi->volume_name) 346 ret = clear_user(arg, 1); 347 else 348 ret = copy_to_user(arg, sbi->volume_name, 349 strlen(sbi->volume_name)); 350 return ret ? -EFAULT : 0; 351 } 352 353 long erofs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 354 { 355 struct inode *inode = file_inode(filp); 356 void __user *argp = (void __user *)arg; 357 358 switch (cmd) { 359 case FS_IOC_GETFSLABEL: 360 return erofs_ioctl_get_volume_label(inode, argp); 361 default: 362 return -ENOTTY; 363 } 364 } 365 366 #ifdef CONFIG_COMPAT 367 long erofs_compat_ioctl(struct file *filp, unsigned int cmd, 368 unsigned long arg) 369 { 370 return erofs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 371 } 372 #endif 373 374 const struct inode_operations erofs_generic_iops = { 375 .getattr = erofs_getattr, 376 .listxattr = erofs_listxattr, 377 .get_inode_acl = erofs_get_acl, 378 .fiemap = erofs_fiemap, 379 }; 380 381 const struct inode_operations erofs_symlink_iops = { 382 .get_link = page_get_link, 383 .getattr = erofs_getattr, 384 .listxattr = erofs_listxattr, 385 .get_inode_acl = erofs_get_acl, 386 }; 387 388 const struct inode_operations erofs_fast_symlink_iops = { 389 .get_link = simple_get_link, 390 .getattr = erofs_getattr, 391 .listxattr = erofs_listxattr, 392 .get_inode_acl = erofs_get_acl, 393 }; 394