xref: /linux/fs/f2fs/inode.c (revision d8441523f21375b11a4593a2d89942b407bcb44f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/inode.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/sched/mm.h>
12 #include <linux/lz4.h>
13 #include <linux/zstd.h>
14 
15 #include "f2fs.h"
16 #include "node.h"
17 #include "segment.h"
18 #include "xattr.h"
19 
20 #include <trace/events/f2fs.h>
21 
22 #ifdef CONFIG_F2FS_FS_COMPRESSION
23 extern const struct address_space_operations f2fs_compress_aops;
24 #endif
25 
f2fs_mark_inode_dirty_sync(struct inode * inode,bool sync)26 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
27 {
28 	if (is_inode_flag_set(inode, FI_NEW_INODE))
29 		return;
30 
31 	if (f2fs_readonly(F2FS_I_SB(inode)->sb))
32 		return;
33 
34 	if (f2fs_inode_dirtied(inode, sync))
35 		return;
36 
37 	/* only atomic file w/ FI_ATOMIC_COMMITTED can be set vfs dirty */
38 	if (f2fs_is_atomic_file(inode) &&
39 			!is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
40 		return;
41 
42 	mark_inode_dirty_sync(inode);
43 }
44 
f2fs_set_inode_flags(struct inode * inode)45 void f2fs_set_inode_flags(struct inode *inode)
46 {
47 	unsigned int flags = F2FS_I(inode)->i_flags;
48 	unsigned int new_fl = 0;
49 
50 	if (flags & F2FS_SYNC_FL)
51 		new_fl |= S_SYNC;
52 	if (flags & F2FS_APPEND_FL)
53 		new_fl |= S_APPEND;
54 	if (flags & F2FS_IMMUTABLE_FL)
55 		new_fl |= S_IMMUTABLE;
56 	if (flags & F2FS_NOATIME_FL)
57 		new_fl |= S_NOATIME;
58 	if (flags & F2FS_DIRSYNC_FL)
59 		new_fl |= S_DIRSYNC;
60 	if (file_is_encrypt(inode))
61 		new_fl |= S_ENCRYPTED;
62 	if (file_is_verity(inode))
63 		new_fl |= S_VERITY;
64 	if (flags & F2FS_CASEFOLD_FL)
65 		new_fl |= S_CASEFOLD;
66 	inode_set_flags(inode, new_fl,
67 			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
68 			S_ENCRYPTED|S_VERITY|S_CASEFOLD);
69 }
70 
__get_inode_rdev(struct inode * inode,struct folio * node_folio)71 static void __get_inode_rdev(struct inode *inode, struct folio *node_folio)
72 {
73 	__le32 *addr = get_dnode_addr(inode, node_folio);
74 
75 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
76 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
77 		if (addr[0])
78 			inode->i_rdev = old_decode_dev(le32_to_cpu(addr[0]));
79 		else
80 			inode->i_rdev = new_decode_dev(le32_to_cpu(addr[1]));
81 	}
82 }
83 
__set_inode_rdev(struct inode * inode,struct folio * node_folio)84 static void __set_inode_rdev(struct inode *inode, struct folio *node_folio)
85 {
86 	__le32 *addr = get_dnode_addr(inode, node_folio);
87 
88 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
89 		if (old_valid_dev(inode->i_rdev)) {
90 			addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev));
91 			addr[1] = 0;
92 		} else {
93 			addr[0] = 0;
94 			addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev));
95 			addr[2] = 0;
96 		}
97 	}
98 }
99 
__recover_inline_status(struct inode * inode,struct folio * ifolio)100 static void __recover_inline_status(struct inode *inode, struct folio *ifolio)
101 {
102 	void *inline_data = inline_data_addr(inode, ifolio);
103 	__le32 *start = inline_data;
104 	__le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
105 
106 	while (start < end) {
107 		if (*start++) {
108 			f2fs_folio_wait_writeback(ifolio, NODE, true, true);
109 
110 			set_inode_flag(inode, FI_DATA_EXIST);
111 			set_raw_inline(inode, F2FS_INODE(&ifolio->page));
112 			folio_mark_dirty(ifolio);
113 			return;
114 		}
115 	}
116 	return;
117 }
118 
f2fs_enable_inode_chksum(struct f2fs_sb_info * sbi,struct page * page)119 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
120 {
121 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
122 
123 	if (!f2fs_sb_has_inode_chksum(sbi))
124 		return false;
125 
126 	if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
127 		return false;
128 
129 	if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
130 				i_inode_checksum))
131 		return false;
132 
133 	return true;
134 }
135 
f2fs_inode_chksum(struct f2fs_sb_info * sbi,struct page * page)136 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
137 {
138 	struct f2fs_node *node = F2FS_NODE(page);
139 	struct f2fs_inode *ri = &node->i;
140 	__le32 ino = node->footer.ino;
141 	__le32 gen = ri->i_generation;
142 	__u32 chksum, chksum_seed;
143 	__u32 dummy_cs = 0;
144 	unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
145 	unsigned int cs_size = sizeof(dummy_cs);
146 
147 	chksum = f2fs_chksum(sbi->s_chksum_seed, (__u8 *)&ino, sizeof(ino));
148 	chksum_seed = f2fs_chksum(chksum, (__u8 *)&gen, sizeof(gen));
149 
150 	chksum = f2fs_chksum(chksum_seed, (__u8 *)ri, offset);
151 	chksum = f2fs_chksum(chksum, (__u8 *)&dummy_cs, cs_size);
152 	offset += cs_size;
153 	chksum = f2fs_chksum(chksum, (__u8 *)ri + offset,
154 			     F2FS_BLKSIZE - offset);
155 	return chksum;
156 }
157 
f2fs_inode_chksum_verify(struct f2fs_sb_info * sbi,struct folio * folio)158 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct folio *folio)
159 {
160 	struct f2fs_inode *ri;
161 	__u32 provided, calculated;
162 
163 	if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
164 		return true;
165 
166 #ifdef CONFIG_F2FS_CHECK_FS
167 	if (!f2fs_enable_inode_chksum(sbi, &folio->page))
168 #else
169 	if (!f2fs_enable_inode_chksum(sbi, &folio->page) ||
170 			folio_test_dirty(folio) ||
171 			folio_test_writeback(folio))
172 #endif
173 		return true;
174 
175 	ri = &F2FS_NODE(&folio->page)->i;
176 	provided = le32_to_cpu(ri->i_inode_checksum);
177 	calculated = f2fs_inode_chksum(sbi, &folio->page);
178 
179 	if (provided != calculated)
180 		f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
181 			  folio->index, ino_of_node(&folio->page),
182 			  provided, calculated);
183 
184 	return provided == calculated;
185 }
186 
f2fs_inode_chksum_set(struct f2fs_sb_info * sbi,struct page * page)187 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
188 {
189 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
190 
191 	if (!f2fs_enable_inode_chksum(sbi, page))
192 		return;
193 
194 	ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
195 }
196 
sanity_check_compress_inode(struct inode * inode,struct f2fs_inode * ri)197 static bool sanity_check_compress_inode(struct inode *inode,
198 			struct f2fs_inode *ri)
199 {
200 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
201 	unsigned char clevel;
202 
203 	if (ri->i_compress_algorithm >= COMPRESS_MAX) {
204 		f2fs_warn(sbi,
205 			"%s: inode (ino=%lx) has unsupported compress algorithm: %u, run fsck to fix",
206 			__func__, inode->i_ino, ri->i_compress_algorithm);
207 		return false;
208 	}
209 	if (le64_to_cpu(ri->i_compr_blocks) >
210 			SECTOR_TO_BLOCK(inode->i_blocks)) {
211 		f2fs_warn(sbi,
212 			"%s: inode (ino=%lx) has inconsistent i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
213 			__func__, inode->i_ino, le64_to_cpu(ri->i_compr_blocks),
214 			SECTOR_TO_BLOCK(inode->i_blocks));
215 		return false;
216 	}
217 	if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
218 		ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
219 		f2fs_warn(sbi,
220 			"%s: inode (ino=%lx) has unsupported log cluster size: %u, run fsck to fix",
221 			__func__, inode->i_ino, ri->i_log_cluster_size);
222 		return false;
223 	}
224 
225 	clevel = le16_to_cpu(ri->i_compress_flag) >>
226 				COMPRESS_LEVEL_OFFSET;
227 	switch (ri->i_compress_algorithm) {
228 	case COMPRESS_LZO:
229 #ifdef CONFIG_F2FS_FS_LZO
230 		if (clevel)
231 			goto err_level;
232 #endif
233 		break;
234 	case COMPRESS_LZORLE:
235 #ifdef CONFIG_F2FS_FS_LZORLE
236 		if (clevel)
237 			goto err_level;
238 #endif
239 		break;
240 	case COMPRESS_LZ4:
241 #ifdef CONFIG_F2FS_FS_LZ4
242 #ifdef CONFIG_F2FS_FS_LZ4HC
243 		if (clevel &&
244 		   (clevel < LZ4HC_MIN_CLEVEL || clevel > LZ4HC_MAX_CLEVEL))
245 			goto err_level;
246 #else
247 		if (clevel)
248 			goto err_level;
249 #endif
250 #endif
251 		break;
252 	case COMPRESS_ZSTD:
253 #ifdef CONFIG_F2FS_FS_ZSTD
254 		if (clevel < zstd_min_clevel() || clevel > zstd_max_clevel())
255 			goto err_level;
256 #endif
257 		break;
258 	default:
259 		goto err_level;
260 	}
261 
262 	return true;
263 err_level:
264 	f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported compress level: %u, run fsck to fix",
265 		  __func__, inode->i_ino, clevel);
266 	return false;
267 }
268 
sanity_check_inode(struct inode * inode,struct page * node_page)269 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
270 {
271 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
272 	struct f2fs_inode_info *fi = F2FS_I(inode);
273 	struct f2fs_inode *ri = F2FS_INODE(node_page);
274 	unsigned long long iblocks;
275 
276 	iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
277 	if (!iblocks) {
278 		f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
279 			  __func__, inode->i_ino, iblocks);
280 		return false;
281 	}
282 
283 	if (ino_of_node(node_page) != nid_of_node(node_page)) {
284 		f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
285 			  __func__, inode->i_ino,
286 			  ino_of_node(node_page), nid_of_node(node_page));
287 		return false;
288 	}
289 
290 	if (ino_of_node(node_page) == fi->i_xattr_nid) {
291 		f2fs_warn(sbi, "%s: corrupted inode i_ino=%lx, xnid=%x, run fsck to fix.",
292 			  __func__, inode->i_ino, fi->i_xattr_nid);
293 		return false;
294 	}
295 
296 	if (f2fs_has_extra_attr(inode)) {
297 		if (!f2fs_sb_has_extra_attr(sbi)) {
298 			f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
299 				  __func__, inode->i_ino);
300 			return false;
301 		}
302 		if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
303 			fi->i_extra_isize < F2FS_MIN_EXTRA_ATTR_SIZE ||
304 			fi->i_extra_isize % sizeof(__le32)) {
305 			f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
306 				  __func__, inode->i_ino, fi->i_extra_isize,
307 				  F2FS_TOTAL_EXTRA_ATTR_SIZE);
308 			return false;
309 		}
310 		if (f2fs_sb_has_compression(sbi) &&
311 			fi->i_flags & F2FS_COMPR_FL &&
312 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
313 						i_compress_flag)) {
314 			if (!sanity_check_compress_inode(inode, ri))
315 				return false;
316 		}
317 	}
318 
319 	if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
320 		f2fs_has_inline_xattr(inode) &&
321 		(fi->i_inline_xattr_size < MIN_INLINE_XATTR_SIZE ||
322 		fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
323 		f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, min: %zu, max: %lu",
324 			  __func__, inode->i_ino, fi->i_inline_xattr_size,
325 			  MIN_INLINE_XATTR_SIZE, MAX_INLINE_XATTR_SIZE);
326 		return false;
327 	}
328 
329 	if (!f2fs_sb_has_extra_attr(sbi)) {
330 		if (f2fs_sb_has_project_quota(sbi)) {
331 			f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
332 				  __func__, inode->i_ino, F2FS_FEATURE_PRJQUOTA);
333 			return false;
334 		}
335 		if (f2fs_sb_has_inode_chksum(sbi)) {
336 			f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
337 				  __func__, inode->i_ino, F2FS_FEATURE_INODE_CHKSUM);
338 			return false;
339 		}
340 		if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
341 			f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
342 				  __func__, inode->i_ino, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR);
343 			return false;
344 		}
345 		if (f2fs_sb_has_inode_crtime(sbi)) {
346 			f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
347 				  __func__, inode->i_ino, F2FS_FEATURE_INODE_CRTIME);
348 			return false;
349 		}
350 		if (f2fs_sb_has_compression(sbi)) {
351 			f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
352 				  __func__, inode->i_ino, F2FS_FEATURE_COMPRESSION);
353 			return false;
354 		}
355 	}
356 
357 	if (f2fs_sanity_check_inline_data(inode, node_page)) {
358 		f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
359 			  __func__, inode->i_ino, inode->i_mode);
360 		return false;
361 	}
362 
363 	if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
364 		f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
365 			  __func__, inode->i_ino, inode->i_mode);
366 		return false;
367 	}
368 
369 	if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
370 		f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
371 			  __func__, inode->i_ino);
372 		return false;
373 	}
374 
375 	if (fi->i_xattr_nid && f2fs_check_nid_range(sbi, fi->i_xattr_nid)) {
376 		f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_xattr_nid: %u, run fsck to fix.",
377 			  __func__, inode->i_ino, fi->i_xattr_nid);
378 		return false;
379 	}
380 
381 	if (IS_DEVICE_ALIASING(inode)) {
382 		if (!f2fs_sb_has_device_alias(sbi)) {
383 			f2fs_warn(sbi, "%s: inode (ino=%lx) has device alias flag, but the feature is off",
384 				  __func__, inode->i_ino);
385 			return false;
386 		}
387 		if (!f2fs_is_pinned_file(inode)) {
388 			f2fs_warn(sbi, "%s: inode (ino=%lx) has device alias flag, but is not pinned",
389 				  __func__, inode->i_ino);
390 			return false;
391 		}
392 	}
393 
394 	return true;
395 }
396 
init_idisk_time(struct inode * inode)397 static void init_idisk_time(struct inode *inode)
398 {
399 	struct f2fs_inode_info *fi = F2FS_I(inode);
400 
401 	fi->i_disk_time[0] = inode_get_atime(inode);
402 	fi->i_disk_time[1] = inode_get_ctime(inode);
403 	fi->i_disk_time[2] = inode_get_mtime(inode);
404 }
405 
do_read_inode(struct inode * inode)406 static int do_read_inode(struct inode *inode)
407 {
408 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
409 	struct f2fs_inode_info *fi = F2FS_I(inode);
410 	struct folio *node_folio;
411 	struct f2fs_inode *ri;
412 	projid_t i_projid;
413 
414 	/* Check if ino is within scope */
415 	if (f2fs_check_nid_range(sbi, inode->i_ino))
416 		return -EINVAL;
417 
418 	node_folio = f2fs_get_inode_folio(sbi, inode->i_ino);
419 	if (IS_ERR(node_folio))
420 		return PTR_ERR(node_folio);
421 
422 	ri = F2FS_INODE(&node_folio->page);
423 
424 	inode->i_mode = le16_to_cpu(ri->i_mode);
425 	i_uid_write(inode, le32_to_cpu(ri->i_uid));
426 	i_gid_write(inode, le32_to_cpu(ri->i_gid));
427 	set_nlink(inode, le32_to_cpu(ri->i_links));
428 	inode->i_size = le64_to_cpu(ri->i_size);
429 	inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
430 
431 	inode_set_atime(inode, le64_to_cpu(ri->i_atime),
432 			le32_to_cpu(ri->i_atime_nsec));
433 	inode_set_ctime(inode, le64_to_cpu(ri->i_ctime),
434 			le32_to_cpu(ri->i_ctime_nsec));
435 	inode_set_mtime(inode, le64_to_cpu(ri->i_mtime),
436 			le32_to_cpu(ri->i_mtime_nsec));
437 	inode->i_generation = le32_to_cpu(ri->i_generation);
438 	if (S_ISDIR(inode->i_mode))
439 		fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
440 	else if (S_ISREG(inode->i_mode))
441 		fi->i_gc_failures = le16_to_cpu(ri->i_gc_failures);
442 	fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
443 	fi->i_flags = le32_to_cpu(ri->i_flags);
444 	if (S_ISREG(inode->i_mode))
445 		fi->i_flags &= ~F2FS_PROJINHERIT_FL;
446 	bitmap_zero(fi->flags, FI_MAX);
447 	fi->i_advise = ri->i_advise;
448 	fi->i_pino = le32_to_cpu(ri->i_pino);
449 	fi->i_dir_level = ri->i_dir_level;
450 
451 	get_inline_info(inode, ri);
452 
453 	fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
454 					le16_to_cpu(ri->i_extra_isize) : 0;
455 
456 	if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
457 		fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
458 	} else if (f2fs_has_inline_xattr(inode) ||
459 				f2fs_has_inline_dentry(inode)) {
460 		fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
461 	} else {
462 
463 		/*
464 		 * Previous inline data or directory always reserved 200 bytes
465 		 * in inode layout, even if inline_xattr is disabled. In order
466 		 * to keep inline_dentry's structure for backward compatibility,
467 		 * we get the space back only from inline_data.
468 		 */
469 		fi->i_inline_xattr_size = 0;
470 	}
471 
472 	if (!sanity_check_inode(inode, &node_folio->page)) {
473 		f2fs_folio_put(node_folio, true);
474 		set_sbi_flag(sbi, SBI_NEED_FSCK);
475 		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
476 		return -EFSCORRUPTED;
477 	}
478 
479 	/* check data exist */
480 	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
481 		__recover_inline_status(inode, node_folio);
482 
483 	/* try to recover cold bit for non-dir inode */
484 	if (!S_ISDIR(inode->i_mode) && !is_cold_node(&node_folio->page)) {
485 		f2fs_folio_wait_writeback(node_folio, NODE, true, true);
486 		set_cold_node(&node_folio->page, false);
487 		folio_mark_dirty(node_folio);
488 	}
489 
490 	/* get rdev by using inline_info */
491 	__get_inode_rdev(inode, node_folio);
492 
493 	if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
494 		fi->last_disk_size = inode->i_size;
495 
496 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
497 		set_inode_flag(inode, FI_PROJ_INHERIT);
498 
499 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
500 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
501 		i_projid = (projid_t)le32_to_cpu(ri->i_projid);
502 	else
503 		i_projid = F2FS_DEF_PROJID;
504 	fi->i_projid = make_kprojid(&init_user_ns, i_projid);
505 
506 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
507 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
508 		fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
509 		fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
510 	}
511 
512 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
513 					(fi->i_flags & F2FS_COMPR_FL)) {
514 		if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
515 					i_compress_flag)) {
516 			unsigned short compress_flag;
517 
518 			atomic_set(&fi->i_compr_blocks,
519 					le64_to_cpu(ri->i_compr_blocks));
520 			fi->i_compress_algorithm = ri->i_compress_algorithm;
521 			fi->i_log_cluster_size = ri->i_log_cluster_size;
522 			compress_flag = le16_to_cpu(ri->i_compress_flag);
523 			fi->i_compress_level = compress_flag >>
524 						COMPRESS_LEVEL_OFFSET;
525 			fi->i_compress_flag = compress_flag &
526 					GENMASK(COMPRESS_LEVEL_OFFSET - 1, 0);
527 			fi->i_cluster_size = BIT(fi->i_log_cluster_size);
528 			set_inode_flag(inode, FI_COMPRESSED_FILE);
529 		}
530 	}
531 
532 	init_idisk_time(inode);
533 
534 	if (!sanity_check_extent_cache(inode, &node_folio->page)) {
535 		f2fs_folio_put(node_folio, true);
536 		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
537 		return -EFSCORRUPTED;
538 	}
539 
540 	/* Need all the flag bits */
541 	f2fs_init_read_extent_tree(inode, node_folio);
542 	f2fs_init_age_extent_tree(inode);
543 
544 	f2fs_folio_put(node_folio, true);
545 
546 	stat_inc_inline_xattr(inode);
547 	stat_inc_inline_inode(inode);
548 	stat_inc_inline_dir(inode);
549 	stat_inc_compr_inode(inode);
550 	stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
551 
552 	return 0;
553 }
554 
is_meta_ino(struct f2fs_sb_info * sbi,unsigned int ino)555 static bool is_meta_ino(struct f2fs_sb_info *sbi, unsigned int ino)
556 {
557 	return ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi) ||
558 		ino == F2FS_COMPRESS_INO(sbi);
559 }
560 
f2fs_iget(struct super_block * sb,unsigned long ino)561 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
562 {
563 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
564 	struct inode *inode;
565 	int ret = 0;
566 
567 	inode = iget_locked(sb, ino);
568 	if (!inode)
569 		return ERR_PTR(-ENOMEM);
570 
571 	if (!(inode->i_state & I_NEW)) {
572 		if (is_meta_ino(sbi, ino)) {
573 			f2fs_err(sbi, "inaccessible inode: %lu, run fsck to repair", ino);
574 			set_sbi_flag(sbi, SBI_NEED_FSCK);
575 			ret = -EFSCORRUPTED;
576 			trace_f2fs_iget_exit(inode, ret);
577 			iput(inode);
578 			f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
579 			return ERR_PTR(ret);
580 		}
581 
582 		trace_f2fs_iget(inode);
583 		return inode;
584 	}
585 
586 	if (is_meta_ino(sbi, ino))
587 		goto make_now;
588 
589 	ret = do_read_inode(inode);
590 	if (ret)
591 		goto bad_inode;
592 make_now:
593 	if (ino == F2FS_NODE_INO(sbi)) {
594 		inode->i_mapping->a_ops = &f2fs_node_aops;
595 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
596 	} else if (ino == F2FS_META_INO(sbi)) {
597 		inode->i_mapping->a_ops = &f2fs_meta_aops;
598 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
599 	} else if (ino == F2FS_COMPRESS_INO(sbi)) {
600 #ifdef CONFIG_F2FS_FS_COMPRESSION
601 		inode->i_mapping->a_ops = &f2fs_compress_aops;
602 		/*
603 		 * generic_error_remove_folio only truncates pages of regular
604 		 * inode
605 		 */
606 		inode->i_mode |= S_IFREG;
607 #endif
608 		mapping_set_gfp_mask(inode->i_mapping,
609 			GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
610 	} else if (S_ISREG(inode->i_mode)) {
611 		inode->i_op = &f2fs_file_inode_operations;
612 		inode->i_fop = &f2fs_file_operations;
613 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
614 	} else if (S_ISDIR(inode->i_mode)) {
615 		inode->i_op = &f2fs_dir_inode_operations;
616 		inode->i_fop = &f2fs_dir_operations;
617 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
618 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
619 	} else if (S_ISLNK(inode->i_mode)) {
620 		if (file_is_encrypt(inode))
621 			inode->i_op = &f2fs_encrypted_symlink_inode_operations;
622 		else
623 			inode->i_op = &f2fs_symlink_inode_operations;
624 		inode_nohighmem(inode);
625 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
626 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
627 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
628 		inode->i_op = &f2fs_special_inode_operations;
629 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
630 	} else {
631 		ret = -EIO;
632 		goto bad_inode;
633 	}
634 	f2fs_set_inode_flags(inode);
635 
636 	unlock_new_inode(inode);
637 	trace_f2fs_iget(inode);
638 	return inode;
639 
640 bad_inode:
641 	f2fs_inode_synced(inode);
642 	iget_failed(inode);
643 	trace_f2fs_iget_exit(inode, ret);
644 	return ERR_PTR(ret);
645 }
646 
f2fs_iget_retry(struct super_block * sb,unsigned long ino)647 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
648 {
649 	struct inode *inode;
650 retry:
651 	inode = f2fs_iget(sb, ino);
652 	if (IS_ERR(inode)) {
653 		if (PTR_ERR(inode) == -ENOMEM) {
654 			memalloc_retry_wait(GFP_NOFS);
655 			goto retry;
656 		}
657 	}
658 	return inode;
659 }
660 
f2fs_update_inode(struct inode * inode,struct folio * node_folio)661 void f2fs_update_inode(struct inode *inode, struct folio *node_folio)
662 {
663 	struct f2fs_inode_info *fi = F2FS_I(inode);
664 	struct f2fs_inode *ri;
665 	struct extent_tree *et = fi->extent_tree[EX_READ];
666 
667 	f2fs_folio_wait_writeback(node_folio, NODE, true, true);
668 	folio_mark_dirty(node_folio);
669 
670 	f2fs_inode_synced(inode);
671 
672 	ri = F2FS_INODE(&node_folio->page);
673 
674 	ri->i_mode = cpu_to_le16(inode->i_mode);
675 	ri->i_advise = fi->i_advise;
676 	ri->i_uid = cpu_to_le32(i_uid_read(inode));
677 	ri->i_gid = cpu_to_le32(i_gid_read(inode));
678 	ri->i_links = cpu_to_le32(inode->i_nlink);
679 	ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
680 
681 	if (!f2fs_is_atomic_file(inode) ||
682 			is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
683 		ri->i_size = cpu_to_le64(i_size_read(inode));
684 
685 	if (et) {
686 		read_lock(&et->lock);
687 		set_raw_read_extent(&et->largest, &ri->i_ext);
688 		read_unlock(&et->lock);
689 	} else {
690 		memset(&ri->i_ext, 0, sizeof(ri->i_ext));
691 	}
692 	set_raw_inline(inode, ri);
693 
694 	ri->i_atime = cpu_to_le64(inode_get_atime_sec(inode));
695 	ri->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
696 	ri->i_mtime = cpu_to_le64(inode_get_mtime_sec(inode));
697 	ri->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
698 	ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
699 	ri->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
700 	if (S_ISDIR(inode->i_mode))
701 		ri->i_current_depth = cpu_to_le32(fi->i_current_depth);
702 	else if (S_ISREG(inode->i_mode))
703 		ri->i_gc_failures = cpu_to_le16(fi->i_gc_failures);
704 	ri->i_xattr_nid = cpu_to_le32(fi->i_xattr_nid);
705 	ri->i_flags = cpu_to_le32(fi->i_flags);
706 	ri->i_pino = cpu_to_le32(fi->i_pino);
707 	ri->i_generation = cpu_to_le32(inode->i_generation);
708 	ri->i_dir_level = fi->i_dir_level;
709 
710 	if (f2fs_has_extra_attr(inode)) {
711 		ri->i_extra_isize = cpu_to_le16(fi->i_extra_isize);
712 
713 		if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
714 			ri->i_inline_xattr_size =
715 				cpu_to_le16(fi->i_inline_xattr_size);
716 
717 		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
718 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid)) {
719 			projid_t i_projid;
720 
721 			i_projid = from_kprojid(&init_user_ns, fi->i_projid);
722 			ri->i_projid = cpu_to_le32(i_projid);
723 		}
724 
725 		if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
726 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
727 			ri->i_crtime = cpu_to_le64(fi->i_crtime.tv_sec);
728 			ri->i_crtime_nsec = cpu_to_le32(fi->i_crtime.tv_nsec);
729 		}
730 
731 		if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
732 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
733 							i_compress_flag)) {
734 			unsigned short compress_flag;
735 
736 			ri->i_compr_blocks = cpu_to_le64(
737 					atomic_read(&fi->i_compr_blocks));
738 			ri->i_compress_algorithm = fi->i_compress_algorithm;
739 			compress_flag = fi->i_compress_flag |
740 						fi->i_compress_level <<
741 						COMPRESS_LEVEL_OFFSET;
742 			ri->i_compress_flag = cpu_to_le16(compress_flag);
743 			ri->i_log_cluster_size = fi->i_log_cluster_size;
744 		}
745 	}
746 
747 	__set_inode_rdev(inode, node_folio);
748 
749 	/* deleted inode */
750 	if (inode->i_nlink == 0)
751 		clear_page_private_inline(&node_folio->page);
752 
753 	init_idisk_time(inode);
754 #ifdef CONFIG_F2FS_CHECK_FS
755 	f2fs_inode_chksum_set(F2FS_I_SB(inode), &node_folio->page);
756 #endif
757 }
758 
f2fs_update_inode_page(struct inode * inode)759 void f2fs_update_inode_page(struct inode *inode)
760 {
761 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
762 	struct folio *node_folio;
763 	int count = 0;
764 retry:
765 	node_folio = f2fs_get_inode_folio(sbi, inode->i_ino);
766 	if (IS_ERR(node_folio)) {
767 		int err = PTR_ERR(node_folio);
768 
769 		/* The node block was truncated. */
770 		if (err == -ENOENT)
771 			return;
772 
773 		if (err == -EFSCORRUPTED)
774 			goto stop_checkpoint;
775 
776 		if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT)
777 			goto retry;
778 stop_checkpoint:
779 		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE);
780 		return;
781 	}
782 	f2fs_update_inode(inode, node_folio);
783 	f2fs_folio_put(node_folio, true);
784 }
785 
f2fs_write_inode(struct inode * inode,struct writeback_control * wbc)786 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
787 {
788 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
789 
790 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
791 			inode->i_ino == F2FS_META_INO(sbi))
792 		return 0;
793 
794 	/*
795 	 * atime could be updated without dirtying f2fs inode in lazytime mode
796 	 */
797 	if (f2fs_is_time_consistent(inode) &&
798 		!is_inode_flag_set(inode, FI_DIRTY_INODE))
799 		return 0;
800 
801 	/*
802 	 * no need to update inode page, ultimately f2fs_evict_inode() will
803 	 * clear dirty status of inode.
804 	 */
805 	if (f2fs_cp_error(sbi))
806 		return -EIO;
807 
808 	if (!f2fs_is_checkpoint_ready(sbi)) {
809 		f2fs_mark_inode_dirty_sync(inode, true);
810 		return -ENOSPC;
811 	}
812 
813 	/*
814 	 * We need to balance fs here to prevent from producing dirty node pages
815 	 * during the urgent cleaning time when running out of free sections.
816 	 */
817 	f2fs_update_inode_page(inode);
818 	if (wbc && wbc->nr_to_write)
819 		f2fs_balance_fs(sbi, true);
820 	return 0;
821 }
822 
f2fs_remove_donate_inode(struct inode * inode)823 static void f2fs_remove_donate_inode(struct inode *inode)
824 {
825 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
826 
827 	if (list_empty(&F2FS_I(inode)->gdonate_list))
828 		return;
829 
830 	spin_lock(&sbi->inode_lock[DONATE_INODE]);
831 	list_del_init(&F2FS_I(inode)->gdonate_list);
832 	sbi->donate_files--;
833 	spin_unlock(&sbi->inode_lock[DONATE_INODE]);
834 }
835 
836 /*
837  * Called at the last iput() if i_nlink is zero
838  */
f2fs_evict_inode(struct inode * inode)839 void f2fs_evict_inode(struct inode *inode)
840 {
841 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
842 	struct f2fs_inode_info *fi = F2FS_I(inode);
843 	nid_t xnid = fi->i_xattr_nid;
844 	int err = 0;
845 	bool freeze_protected = false;
846 
847 	f2fs_abort_atomic_write(inode, true);
848 
849 	if (fi->cow_inode && f2fs_is_cow_file(fi->cow_inode)) {
850 		clear_inode_flag(fi->cow_inode, FI_COW_FILE);
851 		F2FS_I(fi->cow_inode)->atomic_inode = NULL;
852 		iput(fi->cow_inode);
853 		fi->cow_inode = NULL;
854 	}
855 
856 	trace_f2fs_evict_inode(inode);
857 	truncate_inode_pages_final(&inode->i_data);
858 
859 	if ((inode->i_nlink || is_bad_inode(inode)) &&
860 		test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
861 		f2fs_invalidate_compress_pages(sbi, inode->i_ino);
862 
863 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
864 			inode->i_ino == F2FS_META_INO(sbi) ||
865 			inode->i_ino == F2FS_COMPRESS_INO(sbi))
866 		goto out_clear;
867 
868 	f2fs_bug_on(sbi, get_dirty_pages(inode));
869 	f2fs_remove_dirty_inode(inode);
870 	f2fs_remove_donate_inode(inode);
871 
872 	if (!IS_DEVICE_ALIASING(inode))
873 		f2fs_destroy_extent_tree(inode);
874 
875 	if (inode->i_nlink || is_bad_inode(inode))
876 		goto no_delete;
877 
878 	err = f2fs_dquot_initialize(inode);
879 	if (err) {
880 		err = 0;
881 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
882 	}
883 
884 	f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
885 	f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
886 	f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
887 
888 	if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING)) {
889 		sb_start_intwrite(inode->i_sb);
890 		freeze_protected = true;
891 	}
892 	set_inode_flag(inode, FI_NO_ALLOC);
893 	i_size_write(inode, 0);
894 retry:
895 	if (F2FS_HAS_BLOCKS(inode))
896 		err = f2fs_truncate(inode);
897 
898 	if (time_to_inject(sbi, FAULT_EVICT_INODE))
899 		err = -EIO;
900 
901 	if (!err) {
902 		f2fs_lock_op(sbi);
903 		err = f2fs_remove_inode_page(inode);
904 		f2fs_unlock_op(sbi);
905 		if (err == -ENOENT) {
906 			err = 0;
907 
908 			/*
909 			 * in fuzzed image, another node may has the same
910 			 * block address as inode's, if it was truncated
911 			 * previously, truncation of inode node will fail.
912 			 */
913 			if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
914 				f2fs_warn(F2FS_I_SB(inode),
915 					"f2fs_evict_inode: inconsistent node id, ino:%lu",
916 					inode->i_ino);
917 				f2fs_inode_synced(inode);
918 				set_sbi_flag(sbi, SBI_NEED_FSCK);
919 			}
920 		}
921 	}
922 
923 	/* give more chances, if ENOMEM case */
924 	if (err == -ENOMEM) {
925 		err = 0;
926 		goto retry;
927 	}
928 
929 	if (IS_DEVICE_ALIASING(inode))
930 		f2fs_destroy_extent_tree(inode);
931 
932 	if (err) {
933 		f2fs_update_inode_page(inode);
934 		if (dquot_initialize_needed(inode))
935 			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
936 	}
937 	if (freeze_protected)
938 		sb_end_intwrite(inode->i_sb);
939 no_delete:
940 	dquot_drop(inode);
941 
942 	stat_dec_inline_xattr(inode);
943 	stat_dec_inline_dir(inode);
944 	stat_dec_inline_inode(inode);
945 	stat_dec_compr_inode(inode);
946 	stat_sub_compr_blocks(inode,
947 			atomic_read(&fi->i_compr_blocks));
948 
949 	if (likely(!f2fs_cp_error(sbi) &&
950 				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
951 		f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
952 	else
953 		f2fs_inode_synced(inode);
954 
955 	/* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
956 	if (inode->i_ino)
957 		invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
958 							inode->i_ino);
959 	if (xnid)
960 		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
961 	if (inode->i_nlink) {
962 		if (is_inode_flag_set(inode, FI_APPEND_WRITE))
963 			f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
964 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
965 			f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
966 	}
967 	if (is_inode_flag_set(inode, FI_FREE_NID)) {
968 		f2fs_alloc_nid_failed(sbi, inode->i_ino);
969 		clear_inode_flag(inode, FI_FREE_NID);
970 	} else {
971 		/*
972 		 * If xattr nid is corrupted, we can reach out error condition,
973 		 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
974 		 * In that case, f2fs_check_nid_range() is enough to give a clue.
975 		 */
976 	}
977 out_clear:
978 	fscrypt_put_encryption_info(inode);
979 	fsverity_cleanup_inode(inode);
980 	clear_inode(inode);
981 }
982 
983 /* caller should call f2fs_lock_op() */
f2fs_handle_failed_inode(struct inode * inode)984 void f2fs_handle_failed_inode(struct inode *inode)
985 {
986 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
987 	struct node_info ni;
988 	int err;
989 
990 	/*
991 	 * clear nlink of inode in order to release resource of inode
992 	 * immediately.
993 	 */
994 	clear_nlink(inode);
995 
996 	/*
997 	 * we must call this to avoid inode being remained as dirty, resulting
998 	 * in a panic when flushing dirty inodes in gdirty_list.
999 	 */
1000 	f2fs_update_inode_page(inode);
1001 	f2fs_inode_synced(inode);
1002 
1003 	/* don't make bad inode, since it becomes a regular file. */
1004 	unlock_new_inode(inode);
1005 
1006 	/*
1007 	 * Note: we should add inode to orphan list before f2fs_unlock_op()
1008 	 * so we can prevent losing this orphan when encoutering checkpoint
1009 	 * and following suddenly power-off.
1010 	 */
1011 	err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
1012 	if (err) {
1013 		set_sbi_flag(sbi, SBI_NEED_FSCK);
1014 		set_inode_flag(inode, FI_FREE_NID);
1015 		f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
1016 		goto out;
1017 	}
1018 
1019 	if (ni.blk_addr != NULL_ADDR) {
1020 		err = f2fs_acquire_orphan_inode(sbi);
1021 		if (err) {
1022 			set_sbi_flag(sbi, SBI_NEED_FSCK);
1023 			f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
1024 		} else {
1025 			f2fs_add_orphan_inode(inode);
1026 		}
1027 		f2fs_alloc_nid_done(sbi, inode->i_ino);
1028 	} else {
1029 		set_inode_flag(inode, FI_FREE_NID);
1030 	}
1031 
1032 out:
1033 	f2fs_unlock_op(sbi);
1034 
1035 	/* iput will drop the inode object */
1036 	iput(inode);
1037 }
1038