1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2021, Alibaba Cloud 6 */ 7 #include "xattr.h" 8 9 #include <trace/events/erofs.h> 10 11 /* 12 * if inode is successfully read, return its inode page (or sometimes 13 * the inode payload page if it's an extended inode) in order to fill 14 * inline data if possible. 15 */ 16 static void *erofs_read_inode(struct erofs_buf *buf, 17 struct inode *inode, unsigned int *ofs) 18 { 19 struct super_block *sb = inode->i_sb; 20 struct erofs_sb_info *sbi = EROFS_SB(sb); 21 struct erofs_inode *vi = EROFS_I(inode); 22 const erofs_off_t inode_loc = iloc(sbi, vi->nid); 23 24 erofs_blk_t blkaddr, nblks = 0; 25 void *kaddr; 26 struct erofs_inode_compact *dic; 27 struct erofs_inode_extended *die, *copied = NULL; 28 unsigned int ifmt; 29 int err; 30 31 blkaddr = erofs_blknr(inode_loc); 32 *ofs = erofs_blkoff(inode_loc); 33 34 erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u", 35 __func__, vi->nid, *ofs, blkaddr); 36 37 kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP); 38 if (IS_ERR(kaddr)) { 39 erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld", 40 vi->nid, PTR_ERR(kaddr)); 41 return kaddr; 42 } 43 44 dic = kaddr + *ofs; 45 ifmt = le16_to_cpu(dic->i_format); 46 47 if (ifmt & ~EROFS_I_ALL) { 48 erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu", 49 ifmt, vi->nid); 50 err = -EOPNOTSUPP; 51 goto err_out; 52 } 53 54 vi->datalayout = erofs_inode_datalayout(ifmt); 55 if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) { 56 erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu", 57 vi->datalayout, vi->nid); 58 err = -EOPNOTSUPP; 59 goto err_out; 60 } 61 62 switch (erofs_inode_version(ifmt)) { 63 case EROFS_INODE_LAYOUT_EXTENDED: 64 vi->inode_isize = sizeof(struct erofs_inode_extended); 65 /* check if the extended inode acrosses block boundary */ 66 if (*ofs + vi->inode_isize <= EROFS_BLKSIZ) { 67 *ofs += vi->inode_isize; 68 die = (struct erofs_inode_extended *)dic; 69 } else { 70 const unsigned int gotten = EROFS_BLKSIZ - *ofs; 71 72 copied = kmalloc(vi->inode_isize, GFP_NOFS); 73 if (!copied) { 74 err = -ENOMEM; 75 goto err_out; 76 } 77 memcpy(copied, dic, gotten); 78 kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1, 79 EROFS_KMAP); 80 if (IS_ERR(kaddr)) { 81 erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld", 82 vi->nid, PTR_ERR(kaddr)); 83 kfree(copied); 84 return kaddr; 85 } 86 *ofs = vi->inode_isize - gotten; 87 memcpy((u8 *)copied + gotten, kaddr, *ofs); 88 die = copied; 89 } 90 vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount); 91 92 inode->i_mode = le16_to_cpu(die->i_mode); 93 switch (inode->i_mode & S_IFMT) { 94 case S_IFREG: 95 case S_IFDIR: 96 case S_IFLNK: 97 vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr); 98 break; 99 case S_IFCHR: 100 case S_IFBLK: 101 inode->i_rdev = 102 new_decode_dev(le32_to_cpu(die->i_u.rdev)); 103 break; 104 case S_IFIFO: 105 case S_IFSOCK: 106 inode->i_rdev = 0; 107 break; 108 default: 109 goto bogusimode; 110 } 111 i_uid_write(inode, le32_to_cpu(die->i_uid)); 112 i_gid_write(inode, le32_to_cpu(die->i_gid)); 113 set_nlink(inode, le32_to_cpu(die->i_nlink)); 114 115 /* extended inode has its own timestamp */ 116 inode->i_ctime.tv_sec = le64_to_cpu(die->i_mtime); 117 inode->i_ctime.tv_nsec = le32_to_cpu(die->i_mtime_nsec); 118 119 inode->i_size = le64_to_cpu(die->i_size); 120 121 /* total blocks for compressed files */ 122 if (erofs_inode_is_data_compressed(vi->datalayout)) 123 nblks = le32_to_cpu(die->i_u.compressed_blocks); 124 else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) 125 /* fill chunked inode summary info */ 126 vi->chunkformat = le16_to_cpu(die->i_u.c.format); 127 kfree(copied); 128 copied = NULL; 129 break; 130 case EROFS_INODE_LAYOUT_COMPACT: 131 vi->inode_isize = sizeof(struct erofs_inode_compact); 132 *ofs += vi->inode_isize; 133 vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount); 134 135 inode->i_mode = le16_to_cpu(dic->i_mode); 136 switch (inode->i_mode & S_IFMT) { 137 case S_IFREG: 138 case S_IFDIR: 139 case S_IFLNK: 140 vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr); 141 break; 142 case S_IFCHR: 143 case S_IFBLK: 144 inode->i_rdev = 145 new_decode_dev(le32_to_cpu(dic->i_u.rdev)); 146 break; 147 case S_IFIFO: 148 case S_IFSOCK: 149 inode->i_rdev = 0; 150 break; 151 default: 152 goto bogusimode; 153 } 154 i_uid_write(inode, le16_to_cpu(dic->i_uid)); 155 i_gid_write(inode, le16_to_cpu(dic->i_gid)); 156 set_nlink(inode, le16_to_cpu(dic->i_nlink)); 157 158 /* use build time for compact inodes */ 159 inode->i_ctime.tv_sec = sbi->build_time; 160 inode->i_ctime.tv_nsec = sbi->build_time_nsec; 161 162 inode->i_size = le32_to_cpu(dic->i_size); 163 if (erofs_inode_is_data_compressed(vi->datalayout)) 164 nblks = le32_to_cpu(dic->i_u.compressed_blocks); 165 else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) 166 vi->chunkformat = le16_to_cpu(dic->i_u.c.format); 167 break; 168 default: 169 erofs_err(inode->i_sb, 170 "unsupported on-disk inode version %u of nid %llu", 171 erofs_inode_version(ifmt), vi->nid); 172 err = -EOPNOTSUPP; 173 goto err_out; 174 } 175 176 if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { 177 if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) { 178 erofs_err(inode->i_sb, 179 "unsupported chunk format %x of nid %llu", 180 vi->chunkformat, vi->nid); 181 err = -EOPNOTSUPP; 182 goto err_out; 183 } 184 vi->chunkbits = LOG_BLOCK_SIZE + 185 (vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK); 186 } 187 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec; 188 inode->i_atime.tv_sec = inode->i_ctime.tv_sec; 189 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec; 190 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec; 191 192 inode->i_flags &= ~S_DAX; 193 if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) && 194 vi->datalayout == EROFS_INODE_FLAT_PLAIN) 195 inode->i_flags |= S_DAX; 196 if (!nblks) 197 /* measure inode.i_blocks as generic filesystems */ 198 inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9; 199 else 200 inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK; 201 return kaddr; 202 203 bogusimode: 204 erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu", 205 inode->i_mode, vi->nid); 206 err = -EFSCORRUPTED; 207 err_out: 208 DBG_BUGON(1); 209 kfree(copied); 210 erofs_put_metabuf(buf); 211 return ERR_PTR(err); 212 } 213 214 static int erofs_fill_symlink(struct inode *inode, void *kaddr, 215 unsigned int m_pofs) 216 { 217 struct erofs_inode *vi = EROFS_I(inode); 218 char *lnk; 219 220 /* if it cannot be handled with fast symlink scheme */ 221 if (vi->datalayout != EROFS_INODE_FLAT_INLINE || 222 inode->i_size >= EROFS_BLKSIZ) { 223 inode->i_op = &erofs_symlink_iops; 224 return 0; 225 } 226 227 lnk = kmalloc(inode->i_size + 1, GFP_KERNEL); 228 if (!lnk) 229 return -ENOMEM; 230 231 m_pofs += vi->xattr_isize; 232 /* inline symlink data shouldn't cross block boundary */ 233 if (m_pofs + inode->i_size > EROFS_BLKSIZ) { 234 kfree(lnk); 235 erofs_err(inode->i_sb, 236 "inline data cross block boundary @ nid %llu", 237 vi->nid); 238 DBG_BUGON(1); 239 return -EFSCORRUPTED; 240 } 241 memcpy(lnk, kaddr + m_pofs, inode->i_size); 242 lnk[inode->i_size] = '\0'; 243 244 inode->i_link = lnk; 245 inode->i_op = &erofs_fast_symlink_iops; 246 return 0; 247 } 248 249 static int erofs_fill_inode(struct inode *inode, int isdir) 250 { 251 struct erofs_inode *vi = EROFS_I(inode); 252 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 253 void *kaddr; 254 unsigned int ofs; 255 int err = 0; 256 257 trace_erofs_fill_inode(inode, isdir); 258 259 /* read inode base data from disk */ 260 kaddr = erofs_read_inode(&buf, inode, &ofs); 261 if (IS_ERR(kaddr)) 262 return PTR_ERR(kaddr); 263 264 /* setup the new inode */ 265 switch (inode->i_mode & S_IFMT) { 266 case S_IFREG: 267 inode->i_op = &erofs_generic_iops; 268 if (erofs_inode_is_data_compressed(vi->datalayout)) 269 inode->i_fop = &generic_ro_fops; 270 else 271 inode->i_fop = &erofs_file_fops; 272 break; 273 case S_IFDIR: 274 inode->i_op = &erofs_dir_iops; 275 inode->i_fop = &erofs_dir_fops; 276 break; 277 case S_IFLNK: 278 err = erofs_fill_symlink(inode, kaddr, ofs); 279 if (err) 280 goto out_unlock; 281 inode_nohighmem(inode); 282 break; 283 case S_IFCHR: 284 case S_IFBLK: 285 case S_IFIFO: 286 case S_IFSOCK: 287 inode->i_op = &erofs_generic_iops; 288 init_special_inode(inode, inode->i_mode, inode->i_rdev); 289 goto out_unlock; 290 default: 291 err = -EFSCORRUPTED; 292 goto out_unlock; 293 } 294 295 if (erofs_inode_is_data_compressed(vi->datalayout)) { 296 err = z_erofs_fill_inode(inode); 297 goto out_unlock; 298 } 299 inode->i_mapping->a_ops = &erofs_raw_access_aops; 300 301 out_unlock: 302 erofs_put_metabuf(&buf); 303 return err; 304 } 305 306 /* 307 * erofs nid is 64bits, but i_ino is 'unsigned long', therefore 308 * we should do more for 32-bit platform to find the right inode. 309 */ 310 static int erofs_ilookup_test_actor(struct inode *inode, void *opaque) 311 { 312 const erofs_nid_t nid = *(erofs_nid_t *)opaque; 313 314 return EROFS_I(inode)->nid == nid; 315 } 316 317 static int erofs_iget_set_actor(struct inode *inode, void *opaque) 318 { 319 const erofs_nid_t nid = *(erofs_nid_t *)opaque; 320 321 inode->i_ino = erofs_inode_hash(nid); 322 return 0; 323 } 324 325 static inline struct inode *erofs_iget_locked(struct super_block *sb, 326 erofs_nid_t nid) 327 { 328 const unsigned long hashval = erofs_inode_hash(nid); 329 330 return iget5_locked(sb, hashval, erofs_ilookup_test_actor, 331 erofs_iget_set_actor, &nid); 332 } 333 334 struct inode *erofs_iget(struct super_block *sb, 335 erofs_nid_t nid, 336 bool isdir) 337 { 338 struct inode *inode = erofs_iget_locked(sb, nid); 339 340 if (!inode) 341 return ERR_PTR(-ENOMEM); 342 343 if (inode->i_state & I_NEW) { 344 int err; 345 struct erofs_inode *vi = EROFS_I(inode); 346 347 vi->nid = nid; 348 349 err = erofs_fill_inode(inode, isdir); 350 if (!err) 351 unlock_new_inode(inode); 352 else { 353 iget_failed(inode); 354 inode = ERR_PTR(err); 355 } 356 } 357 return inode; 358 } 359 360 int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path, 361 struct kstat *stat, u32 request_mask, 362 unsigned int query_flags) 363 { 364 struct inode *const inode = d_inode(path->dentry); 365 366 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) 367 stat->attributes |= STATX_ATTR_COMPRESSED; 368 369 stat->attributes |= STATX_ATTR_IMMUTABLE; 370 stat->attributes_mask |= (STATX_ATTR_COMPRESSED | 371 STATX_ATTR_IMMUTABLE); 372 373 generic_fillattr(&init_user_ns, inode, stat); 374 return 0; 375 } 376 377 const struct inode_operations erofs_generic_iops = { 378 .getattr = erofs_getattr, 379 .listxattr = erofs_listxattr, 380 .get_acl = erofs_get_acl, 381 .fiemap = erofs_fiemap, 382 }; 383 384 const struct inode_operations erofs_symlink_iops = { 385 .get_link = page_get_link, 386 .getattr = erofs_getattr, 387 .listxattr = erofs_listxattr, 388 .get_acl = erofs_get_acl, 389 }; 390 391 const struct inode_operations erofs_fast_symlink_iops = { 392 .get_link = simple_get_link, 393 .getattr = erofs_getattr, 394 .listxattr = erofs_listxattr, 395 .get_acl = erofs_get_acl, 396 }; 397