xref: /linux/fs/erofs/inode.c (revision b1a54551dd9ed5ef1763b97b35a0999ca002b95c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include "xattr.h"
8 
9 #include <trace/events/erofs.h>
10 
11 static void *erofs_read_inode(struct erofs_buf *buf,
12 			      struct inode *inode, unsigned int *ofs)
13 {
14 	struct super_block *sb = inode->i_sb;
15 	struct erofs_sb_info *sbi = EROFS_SB(sb);
16 	struct erofs_inode *vi = EROFS_I(inode);
17 	const erofs_off_t inode_loc = erofs_iloc(inode);
18 	erofs_blk_t blkaddr, nblks = 0;
19 	void *kaddr;
20 	struct erofs_inode_compact *dic;
21 	struct erofs_inode_extended *die, *copied = NULL;
22 	union erofs_inode_i_u iu;
23 	unsigned int ifmt;
24 	int err;
25 
26 	blkaddr = erofs_blknr(sb, inode_loc);
27 	*ofs = erofs_blkoff(sb, inode_loc);
28 
29 	kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP);
30 	if (IS_ERR(kaddr)) {
31 		erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
32 			  vi->nid, PTR_ERR(kaddr));
33 		return kaddr;
34 	}
35 
36 	dic = kaddr + *ofs;
37 	ifmt = le16_to_cpu(dic->i_format);
38 	if (ifmt & ~EROFS_I_ALL) {
39 		erofs_err(sb, "unsupported i_format %u of nid %llu",
40 			  ifmt, vi->nid);
41 		err = -EOPNOTSUPP;
42 		goto err_out;
43 	}
44 
45 	vi->datalayout = erofs_inode_datalayout(ifmt);
46 	if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
47 		erofs_err(sb, "unsupported datalayout %u of nid %llu",
48 			  vi->datalayout, vi->nid);
49 		err = -EOPNOTSUPP;
50 		goto err_out;
51 	}
52 
53 	switch (erofs_inode_version(ifmt)) {
54 	case EROFS_INODE_LAYOUT_EXTENDED:
55 		vi->inode_isize = sizeof(struct erofs_inode_extended);
56 		/* check if the extended inode acrosses block boundary */
57 		if (*ofs + vi->inode_isize <= sb->s_blocksize) {
58 			*ofs += vi->inode_isize;
59 			die = (struct erofs_inode_extended *)dic;
60 		} else {
61 			const unsigned int gotten = sb->s_blocksize - *ofs;
62 
63 			copied = kmalloc(vi->inode_isize, GFP_NOFS);
64 			if (!copied) {
65 				err = -ENOMEM;
66 				goto err_out;
67 			}
68 			memcpy(copied, dic, gotten);
69 			kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1,
70 						   EROFS_KMAP);
71 			if (IS_ERR(kaddr)) {
72 				erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
73 					  vi->nid, PTR_ERR(kaddr));
74 				kfree(copied);
75 				return kaddr;
76 			}
77 			*ofs = vi->inode_isize - gotten;
78 			memcpy((u8 *)copied + gotten, kaddr, *ofs);
79 			die = copied;
80 		}
81 		vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
82 
83 		inode->i_mode = le16_to_cpu(die->i_mode);
84 		iu = die->i_u;
85 		i_uid_write(inode, le32_to_cpu(die->i_uid));
86 		i_gid_write(inode, le32_to_cpu(die->i_gid));
87 		set_nlink(inode, le32_to_cpu(die->i_nlink));
88 		/* each extended inode has its own timestamp */
89 		inode_set_ctime(inode, le64_to_cpu(die->i_mtime),
90 				le32_to_cpu(die->i_mtime_nsec));
91 
92 		inode->i_size = le64_to_cpu(die->i_size);
93 		kfree(copied);
94 		copied = NULL;
95 		break;
96 	case EROFS_INODE_LAYOUT_COMPACT:
97 		vi->inode_isize = sizeof(struct erofs_inode_compact);
98 		*ofs += vi->inode_isize;
99 		vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
100 
101 		inode->i_mode = le16_to_cpu(dic->i_mode);
102 		iu = dic->i_u;
103 		i_uid_write(inode, le16_to_cpu(dic->i_uid));
104 		i_gid_write(inode, le16_to_cpu(dic->i_gid));
105 		set_nlink(inode, le16_to_cpu(dic->i_nlink));
106 		/* use build time for compact inodes */
107 		inode_set_ctime(inode, sbi->build_time, sbi->build_time_nsec);
108 
109 		inode->i_size = le32_to_cpu(dic->i_size);
110 		break;
111 	default:
112 		erofs_err(sb, "unsupported on-disk inode version %u of nid %llu",
113 			  erofs_inode_version(ifmt), vi->nid);
114 		err = -EOPNOTSUPP;
115 		goto err_out;
116 	}
117 
118 	switch (inode->i_mode & S_IFMT) {
119 	case S_IFREG:
120 	case S_IFDIR:
121 	case S_IFLNK:
122 		vi->raw_blkaddr = le32_to_cpu(iu.raw_blkaddr);
123 		break;
124 	case S_IFCHR:
125 	case S_IFBLK:
126 		inode->i_rdev = new_decode_dev(le32_to_cpu(iu.rdev));
127 		break;
128 	case S_IFIFO:
129 	case S_IFSOCK:
130 		inode->i_rdev = 0;
131 		break;
132 	default:
133 		erofs_err(sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode,
134 			  vi->nid);
135 		err = -EFSCORRUPTED;
136 		goto err_out;
137 	}
138 
139 	/* total blocks for compressed files */
140 	if (erofs_inode_is_data_compressed(vi->datalayout)) {
141 		nblks = le32_to_cpu(iu.compressed_blocks);
142 	} else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
143 		/* fill chunked inode summary info */
144 		vi->chunkformat = le16_to_cpu(iu.c.format);
145 		if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
146 			erofs_err(sb, "unsupported chunk format %x of nid %llu",
147 				  vi->chunkformat, vi->nid);
148 			err = -EOPNOTSUPP;
149 			goto err_out;
150 		}
151 		vi->chunkbits = sb->s_blocksize_bits +
152 			(vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
153 	}
154 	inode_set_mtime_to_ts(inode,
155 			      inode_set_atime_to_ts(inode, inode_get_ctime(inode)));
156 
157 	inode->i_flags &= ~S_DAX;
158 	if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
159 	    (vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
160 	     vi->datalayout == EROFS_INODE_CHUNK_BASED))
161 		inode->i_flags |= S_DAX;
162 
163 	if (!nblks)
164 		/* measure inode.i_blocks as generic filesystems */
165 		inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9;
166 	else
167 		inode->i_blocks = nblks << (sb->s_blocksize_bits - 9);
168 	return kaddr;
169 
170 err_out:
171 	DBG_BUGON(1);
172 	kfree(copied);
173 	erofs_put_metabuf(buf);
174 	return ERR_PTR(err);
175 }
176 
177 static int erofs_fill_symlink(struct inode *inode, void *kaddr,
178 			      unsigned int m_pofs)
179 {
180 	struct erofs_inode *vi = EROFS_I(inode);
181 	unsigned int bsz = i_blocksize(inode);
182 	char *lnk;
183 
184 	/* if it cannot be handled with fast symlink scheme */
185 	if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
186 	    inode->i_size >= bsz || inode->i_size < 0) {
187 		inode->i_op = &erofs_symlink_iops;
188 		return 0;
189 	}
190 
191 	lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
192 	if (!lnk)
193 		return -ENOMEM;
194 
195 	m_pofs += vi->xattr_isize;
196 	/* inline symlink data shouldn't cross block boundary */
197 	if (m_pofs + inode->i_size > bsz) {
198 		kfree(lnk);
199 		erofs_err(inode->i_sb,
200 			  "inline data cross block boundary @ nid %llu",
201 			  vi->nid);
202 		DBG_BUGON(1);
203 		return -EFSCORRUPTED;
204 	}
205 	memcpy(lnk, kaddr + m_pofs, inode->i_size);
206 	lnk[inode->i_size] = '\0';
207 
208 	inode->i_link = lnk;
209 	inode->i_op = &erofs_fast_symlink_iops;
210 	return 0;
211 }
212 
213 static int erofs_fill_inode(struct inode *inode)
214 {
215 	struct erofs_inode *vi = EROFS_I(inode);
216 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
217 	void *kaddr;
218 	unsigned int ofs;
219 	int err = 0;
220 
221 	trace_erofs_fill_inode(inode);
222 
223 	/* read inode base data from disk */
224 	kaddr = erofs_read_inode(&buf, inode, &ofs);
225 	if (IS_ERR(kaddr))
226 		return PTR_ERR(kaddr);
227 
228 	/* setup the new inode */
229 	switch (inode->i_mode & S_IFMT) {
230 	case S_IFREG:
231 		inode->i_op = &erofs_generic_iops;
232 		if (erofs_inode_is_data_compressed(vi->datalayout))
233 			inode->i_fop = &generic_ro_fops;
234 		else
235 			inode->i_fop = &erofs_file_fops;
236 		break;
237 	case S_IFDIR:
238 		inode->i_op = &erofs_dir_iops;
239 		inode->i_fop = &erofs_dir_fops;
240 		inode_nohighmem(inode);
241 		break;
242 	case S_IFLNK:
243 		err = erofs_fill_symlink(inode, kaddr, ofs);
244 		if (err)
245 			goto out_unlock;
246 		inode_nohighmem(inode);
247 		break;
248 	case S_IFCHR:
249 	case S_IFBLK:
250 	case S_IFIFO:
251 	case S_IFSOCK:
252 		inode->i_op = &erofs_generic_iops;
253 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
254 		goto out_unlock;
255 	default:
256 		err = -EFSCORRUPTED;
257 		goto out_unlock;
258 	}
259 
260 	if (erofs_inode_is_data_compressed(vi->datalayout)) {
261 #ifdef CONFIG_EROFS_FS_ZIP
262 		if (!erofs_is_fscache_mode(inode->i_sb)) {
263 			DO_ONCE_LITE_IF(inode->i_sb->s_blocksize != PAGE_SIZE,
264 				  erofs_info, inode->i_sb,
265 				  "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
266 			inode->i_mapping->a_ops = &z_erofs_aops;
267 			err = 0;
268 			goto out_unlock;
269 		}
270 #endif
271 		err = -EOPNOTSUPP;
272 		goto out_unlock;
273 	}
274 	inode->i_mapping->a_ops = &erofs_raw_access_aops;
275 	mapping_set_large_folios(inode->i_mapping);
276 #ifdef CONFIG_EROFS_FS_ONDEMAND
277 	if (erofs_is_fscache_mode(inode->i_sb))
278 		inode->i_mapping->a_ops = &erofs_fscache_access_aops;
279 #endif
280 
281 out_unlock:
282 	erofs_put_metabuf(&buf);
283 	return err;
284 }
285 
286 /*
287  * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
288  * so that it will fit.
289  */
290 static ino_t erofs_squash_ino(erofs_nid_t nid)
291 {
292 	ino_t ino = (ino_t)nid;
293 
294 	if (sizeof(ino_t) < sizeof(erofs_nid_t))
295 		ino ^= nid >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8;
296 	return ino;
297 }
298 
299 static int erofs_iget5_eq(struct inode *inode, void *opaque)
300 {
301 	return EROFS_I(inode)->nid == *(erofs_nid_t *)opaque;
302 }
303 
304 static int erofs_iget5_set(struct inode *inode, void *opaque)
305 {
306 	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
307 
308 	inode->i_ino = erofs_squash_ino(nid);
309 	EROFS_I(inode)->nid = nid;
310 	return 0;
311 }
312 
313 struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid)
314 {
315 	struct inode *inode;
316 
317 	inode = iget5_locked(sb, erofs_squash_ino(nid), erofs_iget5_eq,
318 			     erofs_iget5_set, &nid);
319 	if (!inode)
320 		return ERR_PTR(-ENOMEM);
321 
322 	if (inode->i_state & I_NEW) {
323 		int err = erofs_fill_inode(inode);
324 
325 		if (err) {
326 			iget_failed(inode);
327 			return ERR_PTR(err);
328 		}
329 		unlock_new_inode(inode);
330 	}
331 	return inode;
332 }
333 
334 int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
335 		  struct kstat *stat, u32 request_mask,
336 		  unsigned int query_flags)
337 {
338 	struct inode *const inode = d_inode(path->dentry);
339 
340 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
341 		stat->attributes |= STATX_ATTR_COMPRESSED;
342 
343 	stat->attributes |= STATX_ATTR_IMMUTABLE;
344 	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
345 				  STATX_ATTR_IMMUTABLE);
346 
347 	generic_fillattr(idmap, request_mask, inode, stat);
348 	return 0;
349 }
350 
351 const struct inode_operations erofs_generic_iops = {
352 	.getattr = erofs_getattr,
353 	.listxattr = erofs_listxattr,
354 	.get_inode_acl = erofs_get_acl,
355 	.fiemap = erofs_fiemap,
356 };
357 
358 const struct inode_operations erofs_symlink_iops = {
359 	.get_link = page_get_link,
360 	.getattr = erofs_getattr,
361 	.listxattr = erofs_listxattr,
362 	.get_inode_acl = erofs_get_acl,
363 };
364 
365 const struct inode_operations erofs_fast_symlink_iops = {
366 	.get_link = simple_get_link,
367 	.getattr = erofs_getattr,
368 	.listxattr = erofs_listxattr,
369 	.get_inode_acl = erofs_get_acl,
370 };
371