xref: /linux/fs/erofs/inode.c (revision 37744feebc086908fd89760650f458ab19071750)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             http://www.huawei.com/
5  * Created by Gao Xiang <gaoxiang25@huawei.com>
6  */
7 #include "xattr.h"
8 
9 #include <trace/events/erofs.h>
10 
11 /* no locking */
12 static int erofs_read_inode(struct inode *inode, void *data)
13 {
14 	struct erofs_inode *vi = EROFS_I(inode);
15 	struct erofs_inode_compact *dic = data;
16 	struct erofs_inode_extended *die;
17 
18 	const unsigned int ifmt = le16_to_cpu(dic->i_format);
19 	struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
20 	erofs_blk_t nblks = 0;
21 
22 	vi->datalayout = erofs_inode_datalayout(ifmt);
23 
24 	if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
25 		erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
26 			  vi->datalayout, vi->nid);
27 		DBG_BUGON(1);
28 		return -EOPNOTSUPP;
29 	}
30 
31 	switch (erofs_inode_version(ifmt)) {
32 	case EROFS_INODE_LAYOUT_EXTENDED:
33 		die = data;
34 
35 		vi->inode_isize = sizeof(struct erofs_inode_extended);
36 		vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
37 
38 		inode->i_mode = le16_to_cpu(die->i_mode);
39 		switch (inode->i_mode & S_IFMT) {
40 		case S_IFREG:
41 		case S_IFDIR:
42 		case S_IFLNK:
43 			vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr);
44 			break;
45 		case S_IFCHR:
46 		case S_IFBLK:
47 			inode->i_rdev =
48 				new_decode_dev(le32_to_cpu(die->i_u.rdev));
49 			break;
50 		case S_IFIFO:
51 		case S_IFSOCK:
52 			inode->i_rdev = 0;
53 			break;
54 		default:
55 			goto bogusimode;
56 		}
57 		i_uid_write(inode, le32_to_cpu(die->i_uid));
58 		i_gid_write(inode, le32_to_cpu(die->i_gid));
59 		set_nlink(inode, le32_to_cpu(die->i_nlink));
60 
61 		/* ns timestamp */
62 		inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
63 			le64_to_cpu(die->i_ctime);
64 		inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
65 			le32_to_cpu(die->i_ctime_nsec);
66 
67 		inode->i_size = le64_to_cpu(die->i_size);
68 
69 		/* total blocks for compressed files */
70 		if (erofs_inode_is_data_compressed(vi->datalayout))
71 			nblks = le32_to_cpu(die->i_u.compressed_blocks);
72 		break;
73 	case EROFS_INODE_LAYOUT_COMPACT:
74 		vi->inode_isize = sizeof(struct erofs_inode_compact);
75 		vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
76 
77 		inode->i_mode = le16_to_cpu(dic->i_mode);
78 		switch (inode->i_mode & S_IFMT) {
79 		case S_IFREG:
80 		case S_IFDIR:
81 		case S_IFLNK:
82 			vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr);
83 			break;
84 		case S_IFCHR:
85 		case S_IFBLK:
86 			inode->i_rdev =
87 				new_decode_dev(le32_to_cpu(dic->i_u.rdev));
88 			break;
89 		case S_IFIFO:
90 		case S_IFSOCK:
91 			inode->i_rdev = 0;
92 			break;
93 		default:
94 			goto bogusimode;
95 		}
96 		i_uid_write(inode, le16_to_cpu(dic->i_uid));
97 		i_gid_write(inode, le16_to_cpu(dic->i_gid));
98 		set_nlink(inode, le16_to_cpu(dic->i_nlink));
99 
100 		/* use build time to derive all file time */
101 		inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
102 			sbi->build_time;
103 		inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
104 			sbi->build_time_nsec;
105 
106 		inode->i_size = le32_to_cpu(dic->i_size);
107 		if (erofs_inode_is_data_compressed(vi->datalayout))
108 			nblks = le32_to_cpu(dic->i_u.compressed_blocks);
109 		break;
110 	default:
111 		erofs_err(inode->i_sb,
112 			  "unsupported on-disk inode version %u of nid %llu",
113 			  erofs_inode_version(ifmt), vi->nid);
114 		DBG_BUGON(1);
115 		return -EOPNOTSUPP;
116 	}
117 
118 	if (!nblks)
119 		/* measure inode.i_blocks as generic filesystems */
120 		inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
121 	else
122 		inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
123 	return 0;
124 
125 bogusimode:
126 	erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
127 		  inode->i_mode, vi->nid);
128 	DBG_BUGON(1);
129 	return -EFSCORRUPTED;
130 }
131 
132 static int erofs_fill_symlink(struct inode *inode, void *data,
133 			      unsigned int m_pofs)
134 {
135 	struct erofs_inode *vi = EROFS_I(inode);
136 	char *lnk;
137 
138 	/* if it cannot be handled with fast symlink scheme */
139 	if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
140 	    inode->i_size >= PAGE_SIZE) {
141 		inode->i_op = &erofs_symlink_iops;
142 		return 0;
143 	}
144 
145 	lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
146 	if (!lnk)
147 		return -ENOMEM;
148 
149 	m_pofs += vi->inode_isize + vi->xattr_isize;
150 	/* inline symlink data shouldn't cross page boundary as well */
151 	if (m_pofs + inode->i_size > PAGE_SIZE) {
152 		kfree(lnk);
153 		erofs_err(inode->i_sb,
154 			  "inline data cross block boundary @ nid %llu",
155 			  vi->nid);
156 		DBG_BUGON(1);
157 		return -EFSCORRUPTED;
158 	}
159 
160 	memcpy(lnk, data + m_pofs, inode->i_size);
161 	lnk[inode->i_size] = '\0';
162 
163 	inode->i_link = lnk;
164 	inode->i_op = &erofs_fast_symlink_iops;
165 	return 0;
166 }
167 
168 static int erofs_fill_inode(struct inode *inode, int isdir)
169 {
170 	struct super_block *sb = inode->i_sb;
171 	struct erofs_inode *vi = EROFS_I(inode);
172 	struct page *page;
173 	void *data;
174 	int err;
175 	erofs_blk_t blkaddr;
176 	unsigned int ofs;
177 	erofs_off_t inode_loc;
178 
179 	trace_erofs_fill_inode(inode, isdir);
180 	inode_loc = iloc(EROFS_SB(sb), vi->nid);
181 	blkaddr = erofs_blknr(inode_loc);
182 	ofs = erofs_blkoff(inode_loc);
183 
184 	erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
185 		  __func__, vi->nid, ofs, blkaddr);
186 
187 	page = erofs_get_meta_page(sb, blkaddr);
188 
189 	if (IS_ERR(page)) {
190 		erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
191 			  vi->nid, PTR_ERR(page));
192 		return PTR_ERR(page);
193 	}
194 
195 	DBG_BUGON(!PageUptodate(page));
196 	data = page_address(page);
197 
198 	err = erofs_read_inode(inode, data + ofs);
199 	if (err)
200 		goto out_unlock;
201 
202 	/* setup the new inode */
203 	switch (inode->i_mode & S_IFMT) {
204 	case S_IFREG:
205 		inode->i_op = &erofs_generic_iops;
206 		inode->i_fop = &generic_ro_fops;
207 		break;
208 	case S_IFDIR:
209 		inode->i_op = &erofs_dir_iops;
210 		inode->i_fop = &erofs_dir_fops;
211 		break;
212 	case S_IFLNK:
213 		err = erofs_fill_symlink(inode, data, ofs);
214 		if (err)
215 			goto out_unlock;
216 		inode_nohighmem(inode);
217 		break;
218 	case S_IFCHR:
219 	case S_IFBLK:
220 	case S_IFIFO:
221 	case S_IFSOCK:
222 		inode->i_op = &erofs_generic_iops;
223 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
224 		goto out_unlock;
225 	default:
226 		err = -EFSCORRUPTED;
227 		goto out_unlock;
228 	}
229 
230 	if (erofs_inode_is_data_compressed(vi->datalayout)) {
231 		err = z_erofs_fill_inode(inode);
232 		goto out_unlock;
233 	}
234 	inode->i_mapping->a_ops = &erofs_raw_access_aops;
235 
236 out_unlock:
237 	unlock_page(page);
238 	put_page(page);
239 	return err;
240 }
241 
242 /*
243  * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
244  * we should do more for 32-bit platform to find the right inode.
245  */
246 static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
247 {
248 	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
249 
250 	return EROFS_I(inode)->nid == nid;
251 }
252 
253 static int erofs_iget_set_actor(struct inode *inode, void *opaque)
254 {
255 	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
256 
257 	inode->i_ino = erofs_inode_hash(nid);
258 	return 0;
259 }
260 
261 static inline struct inode *erofs_iget_locked(struct super_block *sb,
262 					      erofs_nid_t nid)
263 {
264 	const unsigned long hashval = erofs_inode_hash(nid);
265 
266 	return iget5_locked(sb, hashval, erofs_ilookup_test_actor,
267 		erofs_iget_set_actor, &nid);
268 }
269 
270 struct inode *erofs_iget(struct super_block *sb,
271 			 erofs_nid_t nid,
272 			 bool isdir)
273 {
274 	struct inode *inode = erofs_iget_locked(sb, nid);
275 
276 	if (!inode)
277 		return ERR_PTR(-ENOMEM);
278 
279 	if (inode->i_state & I_NEW) {
280 		int err;
281 		struct erofs_inode *vi = EROFS_I(inode);
282 
283 		vi->nid = nid;
284 
285 		err = erofs_fill_inode(inode, isdir);
286 		if (!err)
287 			unlock_new_inode(inode);
288 		else {
289 			iget_failed(inode);
290 			inode = ERR_PTR(err);
291 		}
292 	}
293 	return inode;
294 }
295 
296 int erofs_getattr(const struct path *path, struct kstat *stat,
297 		  u32 request_mask, unsigned int query_flags)
298 {
299 	struct inode *const inode = d_inode(path->dentry);
300 
301 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
302 		stat->attributes |= STATX_ATTR_COMPRESSED;
303 
304 	stat->attributes |= STATX_ATTR_IMMUTABLE;
305 	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
306 				  STATX_ATTR_IMMUTABLE);
307 
308 	generic_fillattr(inode, stat);
309 	return 0;
310 }
311 
312 const struct inode_operations erofs_generic_iops = {
313 	.getattr = erofs_getattr,
314 #ifdef CONFIG_EROFS_FS_XATTR
315 	.listxattr = erofs_listxattr,
316 #endif
317 	.get_acl = erofs_get_acl,
318 };
319 
320 const struct inode_operations erofs_symlink_iops = {
321 	.get_link = page_get_link,
322 	.getattr = erofs_getattr,
323 #ifdef CONFIG_EROFS_FS_XATTR
324 	.listxattr = erofs_listxattr,
325 #endif
326 	.get_acl = erofs_get_acl,
327 };
328 
329 const struct inode_operations erofs_fast_symlink_iops = {
330 	.get_link = simple_get_link,
331 	.getattr = erofs_getattr,
332 #ifdef CONFIG_EROFS_FS_XATTR
333 	.listxattr = erofs_listxattr,
334 #endif
335 	.get_acl = erofs_get_acl,
336 };
337 
338