1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
6 */
7 #include "xattr.h"
8 #include <trace/events/erofs.h>
9
erofs_fill_symlink(struct inode * inode,void * kaddr,unsigned int m_pofs)10 static int erofs_fill_symlink(struct inode *inode, void *kaddr,
11 unsigned int m_pofs)
12 {
13 struct erofs_inode *vi = EROFS_I(inode);
14 loff_t off;
15
16 m_pofs += vi->xattr_isize;
17 /* check if it cannot be handled with fast symlink scheme */
18 if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
19 check_add_overflow(m_pofs, inode->i_size, &off) ||
20 off > i_blocksize(inode))
21 return 0;
22
23 inode->i_link = kmemdup_nul(kaddr + m_pofs, inode->i_size, GFP_KERNEL);
24 return inode->i_link ? 0 : -ENOMEM;
25 }
26
erofs_read_inode(struct inode * inode)27 static int erofs_read_inode(struct inode *inode)
28 {
29 struct super_block *sb = inode->i_sb;
30 erofs_blk_t blkaddr = erofs_blknr(sb, erofs_iloc(inode));
31 unsigned int ofs = erofs_blkoff(sb, erofs_iloc(inode));
32 bool in_mbox = erofs_inode_in_metabox(inode);
33 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
34 struct erofs_sb_info *sbi = EROFS_SB(sb);
35 erofs_blk_t addrmask = BIT_ULL(48) - 1;
36 struct erofs_inode *vi = EROFS_I(inode);
37 struct erofs_inode_extended *die, copied;
38 struct erofs_inode_compact *dic;
39 unsigned int ifmt;
40 void *ptr;
41 int err = 0;
42
43 ptr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr), in_mbox);
44 if (IS_ERR(ptr)) {
45 err = PTR_ERR(ptr);
46 erofs_err(sb, "failed to read inode meta block (nid: %llu): %d",
47 vi->nid, err);
48 goto err_out;
49 }
50
51 dic = ptr + ofs;
52 ifmt = le16_to_cpu(dic->i_format);
53 if (ifmt & ~EROFS_I_ALL) {
54 erofs_err(sb, "unsupported i_format %u of nid %llu",
55 ifmt, vi->nid);
56 err = -EOPNOTSUPP;
57 goto err_out;
58 }
59
60 vi->datalayout = erofs_inode_datalayout(ifmt);
61 if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
62 erofs_err(sb, "unsupported datalayout %u of nid %llu",
63 vi->datalayout, vi->nid);
64 err = -EOPNOTSUPP;
65 goto err_out;
66 }
67
68 switch (erofs_inode_version(ifmt)) {
69 case EROFS_INODE_LAYOUT_EXTENDED:
70 vi->inode_isize = sizeof(struct erofs_inode_extended);
71 /* check if the extended inode acrosses block boundary */
72 if (ofs + vi->inode_isize <= sb->s_blocksize) {
73 ofs += vi->inode_isize;
74 die = (struct erofs_inode_extended *)dic;
75 copied.i_u = die->i_u;
76 copied.i_nb = die->i_nb;
77 } else {
78 const unsigned int gotten = sb->s_blocksize - ofs;
79
80 memcpy(&copied, dic, gotten);
81 ptr = erofs_read_metabuf(&buf, sb,
82 erofs_pos(sb, blkaddr + 1), in_mbox);
83 if (IS_ERR(ptr)) {
84 err = PTR_ERR(ptr);
85 erofs_err(sb, "failed to read inode payload block (nid: %llu): %d",
86 vi->nid, err);
87 goto err_out;
88 }
89 ofs = vi->inode_isize - gotten;
90 memcpy((u8 *)&copied + gotten, ptr, ofs);
91 die = &copied;
92 }
93 vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
94
95 inode->i_mode = le16_to_cpu(die->i_mode);
96 i_uid_write(inode, le32_to_cpu(die->i_uid));
97 i_gid_write(inode, le32_to_cpu(die->i_gid));
98 set_nlink(inode, le32_to_cpu(die->i_nlink));
99 inode_set_mtime(inode, le64_to_cpu(die->i_mtime),
100 le32_to_cpu(die->i_mtime_nsec));
101
102 inode->i_size = le64_to_cpu(die->i_size);
103 break;
104 case EROFS_INODE_LAYOUT_COMPACT:
105 vi->inode_isize = sizeof(struct erofs_inode_compact);
106 ofs += vi->inode_isize;
107 vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
108
109 inode->i_mode = le16_to_cpu(dic->i_mode);
110 copied.i_u = dic->i_u;
111 i_uid_write(inode, le16_to_cpu(dic->i_uid));
112 i_gid_write(inode, le16_to_cpu(dic->i_gid));
113 if (!S_ISDIR(inode->i_mode) &&
114 ((ifmt >> EROFS_I_NLINK_1_BIT) & 1)) {
115 set_nlink(inode, 1);
116 copied.i_nb = dic->i_nb;
117 } else {
118 set_nlink(inode, le16_to_cpu(dic->i_nb.nlink));
119 copied.i_nb.startblk_hi = 0;
120 addrmask = BIT_ULL(32) - 1;
121 }
122 inode_set_mtime(inode, sbi->epoch + le32_to_cpu(dic->i_mtime),
123 sbi->fixed_nsec);
124
125 inode->i_size = le32_to_cpu(dic->i_size);
126 break;
127 default:
128 erofs_err(sb, "unsupported on-disk inode version %u of nid %llu",
129 erofs_inode_version(ifmt), vi->nid);
130 err = -EOPNOTSUPP;
131 goto err_out;
132 }
133
134 if (unlikely(inode->i_size < 0)) {
135 erofs_err(sb, "negative i_size @ nid %llu", vi->nid);
136 err = -EFSCORRUPTED;
137 goto err_out;
138 }
139 switch (inode->i_mode & S_IFMT) {
140 case S_IFDIR:
141 vi->dot_omitted = (ifmt >> EROFS_I_DOT_OMITTED_BIT) & 1;
142 fallthrough;
143 case S_IFREG:
144 case S_IFLNK:
145 vi->startblk = le32_to_cpu(copied.i_u.startblk_lo) |
146 ((u64)le16_to_cpu(copied.i_nb.startblk_hi) << 32);
147 if (vi->datalayout == EROFS_INODE_FLAT_PLAIN &&
148 !((vi->startblk ^ EROFS_NULL_ADDR) & addrmask))
149 vi->startblk = EROFS_NULL_ADDR;
150
151 if(S_ISLNK(inode->i_mode)) {
152 err = erofs_fill_symlink(inode, ptr, ofs);
153 if (err)
154 goto err_out;
155 }
156 break;
157 case S_IFCHR:
158 case S_IFBLK:
159 inode->i_rdev = new_decode_dev(le32_to_cpu(copied.i_u.rdev));
160 break;
161 case S_IFIFO:
162 case S_IFSOCK:
163 inode->i_rdev = 0;
164 break;
165 default:
166 erofs_err(sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode,
167 vi->nid);
168 err = -EFSCORRUPTED;
169 goto err_out;
170 }
171
172 if (erofs_inode_is_data_compressed(vi->datalayout))
173 inode->i_blocks = le32_to_cpu(copied.i_u.blocks_lo) <<
174 (sb->s_blocksize_bits - 9);
175 else
176 inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9;
177
178 if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
179 /* fill chunked inode summary info */
180 vi->chunkformat = le16_to_cpu(copied.i_u.c.format);
181 if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
182 erofs_err(sb, "unsupported chunk format %x of nid %llu",
183 vi->chunkformat, vi->nid);
184 err = -EOPNOTSUPP;
185 goto err_out;
186 }
187 vi->chunkbits = sb->s_blocksize_bits +
188 (vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
189 }
190 inode_set_atime_to_ts(inode,
191 inode_set_ctime_to_ts(inode, inode_get_mtime(inode)));
192
193 inode->i_flags &= ~S_DAX;
194 if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
195 (vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
196 vi->datalayout == EROFS_INODE_CHUNK_BASED))
197 inode->i_flags |= S_DAX;
198 err_out:
199 erofs_put_metabuf(&buf);
200 return err;
201 }
202
erofs_fill_inode(struct inode * inode)203 static int erofs_fill_inode(struct inode *inode)
204 {
205 struct erofs_inode *vi = EROFS_I(inode);
206 int err;
207
208 trace_erofs_fill_inode(inode);
209 err = erofs_read_inode(inode);
210 if (err)
211 return err;
212
213 switch (inode->i_mode & S_IFMT) {
214 case S_IFREG:
215 inode->i_op = &erofs_generic_iops;
216 if (erofs_inode_is_data_compressed(vi->datalayout))
217 inode->i_fop = &generic_ro_fops;
218 else
219 inode->i_fop = &erofs_file_fops;
220 break;
221 case S_IFDIR:
222 inode->i_op = &erofs_dir_iops;
223 inode->i_fop = &erofs_dir_fops;
224 inode_nohighmem(inode);
225 break;
226 case S_IFLNK:
227 if (inode->i_link)
228 inode->i_op = &erofs_fast_symlink_iops;
229 else
230 inode->i_op = &erofs_symlink_iops;
231 inode_nohighmem(inode);
232 break;
233 default:
234 inode->i_op = &erofs_generic_iops;
235 init_special_inode(inode, inode->i_mode, inode->i_rdev);
236 return 0;
237 }
238
239 mapping_set_large_folios(inode->i_mapping);
240 if (erofs_inode_is_data_compressed(vi->datalayout)) {
241 #ifdef CONFIG_EROFS_FS_ZIP
242 DO_ONCE_LITE_IF(inode->i_blkbits != PAGE_SHIFT,
243 erofs_info, inode->i_sb,
244 "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
245 inode->i_mapping->a_ops = &z_erofs_aops;
246 #else
247 err = -EOPNOTSUPP;
248 #endif
249 } else {
250 inode->i_mapping->a_ops = &erofs_aops;
251 #ifdef CONFIG_EROFS_FS_ONDEMAND
252 if (erofs_is_fscache_mode(inode->i_sb))
253 inode->i_mapping->a_ops = &erofs_fscache_access_aops;
254 #endif
255 #ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
256 if (erofs_is_fileio_mode(EROFS_SB(inode->i_sb)))
257 inode->i_mapping->a_ops = &erofs_fileio_aops;
258 #endif
259 }
260
261 return err;
262 }
263
264 /*
265 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
266 * so that it will fit.
267 */
erofs_squash_ino(struct super_block * sb,erofs_nid_t nid)268 static ino_t erofs_squash_ino(struct super_block *sb, erofs_nid_t nid)
269 {
270 u64 ino64 = erofs_nid_to_ino64(EROFS_SB(sb), nid);
271
272 if (sizeof(ino_t) < sizeof(erofs_nid_t))
273 ino64 ^= ino64 >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8;
274 return (ino_t)ino64;
275 }
276
erofs_iget5_eq(struct inode * inode,void * opaque)277 static int erofs_iget5_eq(struct inode *inode, void *opaque)
278 {
279 return EROFS_I(inode)->nid == *(erofs_nid_t *)opaque;
280 }
281
erofs_iget5_set(struct inode * inode,void * opaque)282 static int erofs_iget5_set(struct inode *inode, void *opaque)
283 {
284 const erofs_nid_t nid = *(erofs_nid_t *)opaque;
285
286 inode->i_ino = erofs_squash_ino(inode->i_sb, nid);
287 EROFS_I(inode)->nid = nid;
288 return 0;
289 }
290
erofs_iget(struct super_block * sb,erofs_nid_t nid)291 struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid)
292 {
293 struct inode *inode;
294
295 inode = iget5_locked(sb, erofs_squash_ino(sb, nid), erofs_iget5_eq,
296 erofs_iget5_set, &nid);
297 if (!inode)
298 return ERR_PTR(-ENOMEM);
299
300 if (inode->i_state & I_NEW) {
301 int err = erofs_fill_inode(inode);
302
303 if (err) {
304 iget_failed(inode);
305 return ERR_PTR(err);
306 }
307 unlock_new_inode(inode);
308 }
309 return inode;
310 }
311
erofs_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)312 int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
313 struct kstat *stat, u32 request_mask,
314 unsigned int query_flags)
315 {
316 struct inode *const inode = d_inode(path->dentry);
317 struct block_device *bdev = inode->i_sb->s_bdev;
318 bool compressed =
319 erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout);
320
321 if (compressed)
322 stat->attributes |= STATX_ATTR_COMPRESSED;
323 stat->attributes |= STATX_ATTR_IMMUTABLE;
324 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
325 STATX_ATTR_IMMUTABLE);
326
327 /*
328 * Return the DIO alignment restrictions if requested.
329 *
330 * In EROFS, STATX_DIOALIGN is only supported in bdev-based mode
331 * and uncompressed inodes, otherwise we report no DIO support.
332 */
333 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
334 stat->result_mask |= STATX_DIOALIGN;
335 if (bdev && !compressed) {
336 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
337 stat->dio_offset_align = bdev_logical_block_size(bdev);
338 }
339 }
340 generic_fillattr(idmap, request_mask, inode, stat);
341 return 0;
342 }
343
344 const struct inode_operations erofs_generic_iops = {
345 .getattr = erofs_getattr,
346 .listxattr = erofs_listxattr,
347 .get_inode_acl = erofs_get_acl,
348 .fiemap = erofs_fiemap,
349 };
350
351 const struct inode_operations erofs_symlink_iops = {
352 .get_link = page_get_link,
353 .getattr = erofs_getattr,
354 .listxattr = erofs_listxattr,
355 .get_inode_acl = erofs_get_acl,
356 };
357
358 const struct inode_operations erofs_fast_symlink_iops = {
359 .get_link = simple_get_link,
360 .getattr = erofs_getattr,
361 .listxattr = erofs_listxattr,
362 .get_inode_acl = erofs_get_acl,
363 };
364