xref: /linux/fs/f2fs/inode.c (revision 24ce659dcc02c21f8d6c0a7589c3320a4dfa8152)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/inode.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/backing-dev.h>
12 #include <linux/writeback.h>
13 
14 #include "f2fs.h"
15 #include "node.h"
16 #include "segment.h"
17 #include "xattr.h"
18 
19 #include <trace/events/f2fs.h>
20 
21 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
22 {
23 	if (is_inode_flag_set(inode, FI_NEW_INODE))
24 		return;
25 
26 	if (f2fs_inode_dirtied(inode, sync))
27 		return;
28 
29 	mark_inode_dirty_sync(inode);
30 }
31 
32 void f2fs_set_inode_flags(struct inode *inode)
33 {
34 	unsigned int flags = F2FS_I(inode)->i_flags;
35 	unsigned int new_fl = 0;
36 
37 	if (flags & F2FS_SYNC_FL)
38 		new_fl |= S_SYNC;
39 	if (flags & F2FS_APPEND_FL)
40 		new_fl |= S_APPEND;
41 	if (flags & F2FS_IMMUTABLE_FL)
42 		new_fl |= S_IMMUTABLE;
43 	if (flags & F2FS_NOATIME_FL)
44 		new_fl |= S_NOATIME;
45 	if (flags & F2FS_DIRSYNC_FL)
46 		new_fl |= S_DIRSYNC;
47 	if (file_is_encrypt(inode))
48 		new_fl |= S_ENCRYPTED;
49 	if (file_is_verity(inode))
50 		new_fl |= S_VERITY;
51 	if (flags & F2FS_CASEFOLD_FL)
52 		new_fl |= S_CASEFOLD;
53 	inode_set_flags(inode, new_fl,
54 			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
55 			S_ENCRYPTED|S_VERITY|S_CASEFOLD);
56 }
57 
58 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
59 {
60 	int extra_size = get_extra_isize(inode);
61 
62 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
63 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
64 		if (ri->i_addr[extra_size])
65 			inode->i_rdev = old_decode_dev(
66 				le32_to_cpu(ri->i_addr[extra_size]));
67 		else
68 			inode->i_rdev = new_decode_dev(
69 				le32_to_cpu(ri->i_addr[extra_size + 1]));
70 	}
71 }
72 
73 static int __written_first_block(struct f2fs_sb_info *sbi,
74 					struct f2fs_inode *ri)
75 {
76 	block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
77 
78 	if (!__is_valid_data_blkaddr(addr))
79 		return 1;
80 	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
81 		return -EFSCORRUPTED;
82 	return 0;
83 }
84 
85 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
86 {
87 	int extra_size = get_extra_isize(inode);
88 
89 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
90 		if (old_valid_dev(inode->i_rdev)) {
91 			ri->i_addr[extra_size] =
92 				cpu_to_le32(old_encode_dev(inode->i_rdev));
93 			ri->i_addr[extra_size + 1] = 0;
94 		} else {
95 			ri->i_addr[extra_size] = 0;
96 			ri->i_addr[extra_size + 1] =
97 				cpu_to_le32(new_encode_dev(inode->i_rdev));
98 			ri->i_addr[extra_size + 2] = 0;
99 		}
100 	}
101 }
102 
103 static void __recover_inline_status(struct inode *inode, struct page *ipage)
104 {
105 	void *inline_data = inline_data_addr(inode, ipage);
106 	__le32 *start = inline_data;
107 	__le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
108 
109 	while (start < end) {
110 		if (*start++) {
111 			f2fs_wait_on_page_writeback(ipage, NODE, true, true);
112 
113 			set_inode_flag(inode, FI_DATA_EXIST);
114 			set_raw_inline(inode, F2FS_INODE(ipage));
115 			set_page_dirty(ipage);
116 			return;
117 		}
118 	}
119 	return;
120 }
121 
122 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
123 {
124 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
125 
126 	if (!f2fs_sb_has_inode_chksum(sbi))
127 		return false;
128 
129 	if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
130 		return false;
131 
132 	if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
133 				i_inode_checksum))
134 		return false;
135 
136 	return true;
137 }
138 
139 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
140 {
141 	struct f2fs_node *node = F2FS_NODE(page);
142 	struct f2fs_inode *ri = &node->i;
143 	__le32 ino = node->footer.ino;
144 	__le32 gen = ri->i_generation;
145 	__u32 chksum, chksum_seed;
146 	__u32 dummy_cs = 0;
147 	unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
148 	unsigned int cs_size = sizeof(dummy_cs);
149 
150 	chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
151 							sizeof(ino));
152 	chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
153 
154 	chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
155 	chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
156 	offset += cs_size;
157 	chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
158 						F2FS_BLKSIZE - offset);
159 	return chksum;
160 }
161 
162 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
163 {
164 	struct f2fs_inode *ri;
165 	__u32 provided, calculated;
166 
167 	if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
168 		return true;
169 
170 #ifdef CONFIG_F2FS_CHECK_FS
171 	if (!f2fs_enable_inode_chksum(sbi, page))
172 #else
173 	if (!f2fs_enable_inode_chksum(sbi, page) ||
174 			PageDirty(page) || PageWriteback(page))
175 #endif
176 		return true;
177 
178 	ri = &F2FS_NODE(page)->i;
179 	provided = le32_to_cpu(ri->i_inode_checksum);
180 	calculated = f2fs_inode_chksum(sbi, page);
181 
182 	if (provided != calculated)
183 		f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
184 			  page->index, ino_of_node(page), provided, calculated);
185 
186 	return provided == calculated;
187 }
188 
189 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
190 {
191 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
192 
193 	if (!f2fs_enable_inode_chksum(sbi, page))
194 		return;
195 
196 	ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
197 }
198 
199 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
200 {
201 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
202 	struct f2fs_inode_info *fi = F2FS_I(inode);
203 	struct f2fs_inode *ri = F2FS_INODE(node_page);
204 	unsigned long long iblocks;
205 
206 	iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
207 	if (!iblocks) {
208 		set_sbi_flag(sbi, SBI_NEED_FSCK);
209 		f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
210 			  __func__, inode->i_ino, iblocks);
211 		return false;
212 	}
213 
214 	if (ino_of_node(node_page) != nid_of_node(node_page)) {
215 		set_sbi_flag(sbi, SBI_NEED_FSCK);
216 		f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
217 			  __func__, inode->i_ino,
218 			  ino_of_node(node_page), nid_of_node(node_page));
219 		return false;
220 	}
221 
222 	if (f2fs_sb_has_flexible_inline_xattr(sbi)
223 			&& !f2fs_has_extra_attr(inode)) {
224 		set_sbi_flag(sbi, SBI_NEED_FSCK);
225 		f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
226 			  __func__, inode->i_ino);
227 		return false;
228 	}
229 
230 	if (f2fs_has_extra_attr(inode) &&
231 			!f2fs_sb_has_extra_attr(sbi)) {
232 		set_sbi_flag(sbi, SBI_NEED_FSCK);
233 		f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
234 			  __func__, inode->i_ino);
235 		return false;
236 	}
237 
238 	if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
239 			fi->i_extra_isize % sizeof(__le32)) {
240 		set_sbi_flag(sbi, SBI_NEED_FSCK);
241 		f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
242 			  __func__, inode->i_ino, fi->i_extra_isize,
243 			  F2FS_TOTAL_EXTRA_ATTR_SIZE);
244 		return false;
245 	}
246 
247 	if (f2fs_has_extra_attr(inode) &&
248 		f2fs_sb_has_flexible_inline_xattr(sbi) &&
249 		f2fs_has_inline_xattr(inode) &&
250 		(!fi->i_inline_xattr_size ||
251 		fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
252 		set_sbi_flag(sbi, SBI_NEED_FSCK);
253 		f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
254 			  __func__, inode->i_ino, fi->i_inline_xattr_size,
255 			  MAX_INLINE_XATTR_SIZE);
256 		return false;
257 	}
258 
259 	if (F2FS_I(inode)->extent_tree) {
260 		struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
261 
262 		if (ei->len &&
263 			(!f2fs_is_valid_blkaddr(sbi, ei->blk,
264 						DATA_GENERIC_ENHANCE) ||
265 			!f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
266 						DATA_GENERIC_ENHANCE))) {
267 			set_sbi_flag(sbi, SBI_NEED_FSCK);
268 			f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
269 				  __func__, inode->i_ino,
270 				  ei->blk, ei->fofs, ei->len);
271 			return false;
272 		}
273 	}
274 
275 	if (f2fs_has_inline_data(inode) &&
276 			(!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
277 		set_sbi_flag(sbi, SBI_NEED_FSCK);
278 		f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
279 			  __func__, inode->i_ino, inode->i_mode);
280 		return false;
281 	}
282 
283 	if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
284 		set_sbi_flag(sbi, SBI_NEED_FSCK);
285 		f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
286 			  __func__, inode->i_ino, inode->i_mode);
287 		return false;
288 	}
289 
290 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
291 			fi->i_flags & F2FS_COMPR_FL &&
292 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
293 						i_log_cluster_size)) {
294 		if (ri->i_compress_algorithm >= COMPRESS_MAX) {
295 			f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
296 				"compress algorithm: %u, run fsck to fix",
297 				  __func__, inode->i_ino,
298 				  ri->i_compress_algorithm);
299 			return false;
300 		}
301 		if (le64_to_cpu(ri->i_compr_blocks) >
302 				SECTOR_TO_BLOCK(inode->i_blocks)) {
303 			f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent "
304 				"i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
305 				  __func__, inode->i_ino,
306 				  le64_to_cpu(ri->i_compr_blocks),
307 				  SECTOR_TO_BLOCK(inode->i_blocks));
308 			return false;
309 		}
310 		if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
311 			ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
312 			f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
313 				"log cluster size: %u, run fsck to fix",
314 				  __func__, inode->i_ino,
315 				  ri->i_log_cluster_size);
316 			return false;
317 		}
318 	}
319 
320 	return true;
321 }
322 
323 static int do_read_inode(struct inode *inode)
324 {
325 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
326 	struct f2fs_inode_info *fi = F2FS_I(inode);
327 	struct page *node_page;
328 	struct f2fs_inode *ri;
329 	projid_t i_projid;
330 	int err;
331 
332 	/* Check if ino is within scope */
333 	if (f2fs_check_nid_range(sbi, inode->i_ino))
334 		return -EINVAL;
335 
336 	node_page = f2fs_get_node_page(sbi, inode->i_ino);
337 	if (IS_ERR(node_page))
338 		return PTR_ERR(node_page);
339 
340 	ri = F2FS_INODE(node_page);
341 
342 	inode->i_mode = le16_to_cpu(ri->i_mode);
343 	i_uid_write(inode, le32_to_cpu(ri->i_uid));
344 	i_gid_write(inode, le32_to_cpu(ri->i_gid));
345 	set_nlink(inode, le32_to_cpu(ri->i_links));
346 	inode->i_size = le64_to_cpu(ri->i_size);
347 	inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
348 
349 	inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
350 	inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
351 	inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
352 	inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
353 	inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
354 	inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
355 	inode->i_generation = le32_to_cpu(ri->i_generation);
356 	if (S_ISDIR(inode->i_mode))
357 		fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
358 	else if (S_ISREG(inode->i_mode))
359 		fi->i_gc_failures[GC_FAILURE_PIN] =
360 					le16_to_cpu(ri->i_gc_failures);
361 	fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
362 	fi->i_flags = le32_to_cpu(ri->i_flags);
363 	if (S_ISREG(inode->i_mode))
364 		fi->i_flags &= ~F2FS_PROJINHERIT_FL;
365 	bitmap_zero(fi->flags, FI_MAX);
366 	fi->i_advise = ri->i_advise;
367 	fi->i_pino = le32_to_cpu(ri->i_pino);
368 	fi->i_dir_level = ri->i_dir_level;
369 
370 	if (f2fs_init_extent_tree(inode, &ri->i_ext))
371 		set_page_dirty(node_page);
372 
373 	get_inline_info(inode, ri);
374 
375 	fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
376 					le16_to_cpu(ri->i_extra_isize) : 0;
377 
378 	if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
379 		fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
380 	} else if (f2fs_has_inline_xattr(inode) ||
381 				f2fs_has_inline_dentry(inode)) {
382 		fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
383 	} else {
384 
385 		/*
386 		 * Previous inline data or directory always reserved 200 bytes
387 		 * in inode layout, even if inline_xattr is disabled. In order
388 		 * to keep inline_dentry's structure for backward compatibility,
389 		 * we get the space back only from inline_data.
390 		 */
391 		fi->i_inline_xattr_size = 0;
392 	}
393 
394 	if (!sanity_check_inode(inode, node_page)) {
395 		f2fs_put_page(node_page, 1);
396 		return -EFSCORRUPTED;
397 	}
398 
399 	/* check data exist */
400 	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
401 		__recover_inline_status(inode, node_page);
402 
403 	/* try to recover cold bit for non-dir inode */
404 	if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
405 		set_cold_node(node_page, false);
406 		set_page_dirty(node_page);
407 	}
408 
409 	/* get rdev by using inline_info */
410 	__get_inode_rdev(inode, ri);
411 
412 	if (S_ISREG(inode->i_mode)) {
413 		err = __written_first_block(sbi, ri);
414 		if (err < 0) {
415 			f2fs_put_page(node_page, 1);
416 			return err;
417 		}
418 		if (!err)
419 			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
420 	}
421 
422 	if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
423 		fi->last_disk_size = inode->i_size;
424 
425 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
426 		set_inode_flag(inode, FI_PROJ_INHERIT);
427 
428 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
429 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
430 		i_projid = (projid_t)le32_to_cpu(ri->i_projid);
431 	else
432 		i_projid = F2FS_DEF_PROJID;
433 	fi->i_projid = make_kprojid(&init_user_ns, i_projid);
434 
435 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
436 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
437 		fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
438 		fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
439 	}
440 
441 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
442 					(fi->i_flags & F2FS_COMPR_FL)) {
443 		if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
444 					i_log_cluster_size)) {
445 			fi->i_compr_blocks = le64_to_cpu(ri->i_compr_blocks);
446 			fi->i_compress_algorithm = ri->i_compress_algorithm;
447 			fi->i_log_cluster_size = ri->i_log_cluster_size;
448 			fi->i_cluster_size = 1 << fi->i_log_cluster_size;
449 			set_inode_flag(inode, FI_COMPRESSED_FILE);
450 		}
451 	}
452 
453 	F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
454 	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
455 	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
456 	F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
457 	f2fs_put_page(node_page, 1);
458 
459 	stat_inc_inline_xattr(inode);
460 	stat_inc_inline_inode(inode);
461 	stat_inc_inline_dir(inode);
462 	stat_inc_compr_inode(inode);
463 	stat_add_compr_blocks(inode, F2FS_I(inode)->i_compr_blocks);
464 
465 	return 0;
466 }
467 
468 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
469 {
470 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
471 	struct inode *inode;
472 	int ret = 0;
473 
474 	inode = iget_locked(sb, ino);
475 	if (!inode)
476 		return ERR_PTR(-ENOMEM);
477 
478 	if (!(inode->i_state & I_NEW)) {
479 		trace_f2fs_iget(inode);
480 		return inode;
481 	}
482 	if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
483 		goto make_now;
484 
485 	ret = do_read_inode(inode);
486 	if (ret)
487 		goto bad_inode;
488 make_now:
489 	if (ino == F2FS_NODE_INO(sbi)) {
490 		inode->i_mapping->a_ops = &f2fs_node_aops;
491 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
492 	} else if (ino == F2FS_META_INO(sbi)) {
493 		inode->i_mapping->a_ops = &f2fs_meta_aops;
494 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
495 	} else if (S_ISREG(inode->i_mode)) {
496 		inode->i_op = &f2fs_file_inode_operations;
497 		inode->i_fop = &f2fs_file_operations;
498 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
499 	} else if (S_ISDIR(inode->i_mode)) {
500 		inode->i_op = &f2fs_dir_inode_operations;
501 		inode->i_fop = &f2fs_dir_operations;
502 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
503 		inode_nohighmem(inode);
504 	} else if (S_ISLNK(inode->i_mode)) {
505 		if (file_is_encrypt(inode))
506 			inode->i_op = &f2fs_encrypted_symlink_inode_operations;
507 		else
508 			inode->i_op = &f2fs_symlink_inode_operations;
509 		inode_nohighmem(inode);
510 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
511 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
512 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
513 		inode->i_op = &f2fs_special_inode_operations;
514 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
515 	} else {
516 		ret = -EIO;
517 		goto bad_inode;
518 	}
519 	f2fs_set_inode_flags(inode);
520 	unlock_new_inode(inode);
521 	trace_f2fs_iget(inode);
522 	return inode;
523 
524 bad_inode:
525 	f2fs_inode_synced(inode);
526 	iget_failed(inode);
527 	trace_f2fs_iget_exit(inode, ret);
528 	return ERR_PTR(ret);
529 }
530 
531 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
532 {
533 	struct inode *inode;
534 retry:
535 	inode = f2fs_iget(sb, ino);
536 	if (IS_ERR(inode)) {
537 		if (PTR_ERR(inode) == -ENOMEM) {
538 			congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
539 			goto retry;
540 		}
541 	}
542 	return inode;
543 }
544 
545 void f2fs_update_inode(struct inode *inode, struct page *node_page)
546 {
547 	struct f2fs_inode *ri;
548 	struct extent_tree *et = F2FS_I(inode)->extent_tree;
549 
550 	f2fs_wait_on_page_writeback(node_page, NODE, true, true);
551 	set_page_dirty(node_page);
552 
553 	f2fs_inode_synced(inode);
554 
555 	ri = F2FS_INODE(node_page);
556 
557 	ri->i_mode = cpu_to_le16(inode->i_mode);
558 	ri->i_advise = F2FS_I(inode)->i_advise;
559 	ri->i_uid = cpu_to_le32(i_uid_read(inode));
560 	ri->i_gid = cpu_to_le32(i_gid_read(inode));
561 	ri->i_links = cpu_to_le32(inode->i_nlink);
562 	ri->i_size = cpu_to_le64(i_size_read(inode));
563 	ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
564 
565 	if (et) {
566 		read_lock(&et->lock);
567 		set_raw_extent(&et->largest, &ri->i_ext);
568 		read_unlock(&et->lock);
569 	} else {
570 		memset(&ri->i_ext, 0, sizeof(ri->i_ext));
571 	}
572 	set_raw_inline(inode, ri);
573 
574 	ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
575 	ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
576 	ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
577 	ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
578 	ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
579 	ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
580 	if (S_ISDIR(inode->i_mode))
581 		ri->i_current_depth =
582 			cpu_to_le32(F2FS_I(inode)->i_current_depth);
583 	else if (S_ISREG(inode->i_mode))
584 		ri->i_gc_failures =
585 			cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
586 	ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
587 	ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
588 	ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
589 	ri->i_generation = cpu_to_le32(inode->i_generation);
590 	ri->i_dir_level = F2FS_I(inode)->i_dir_level;
591 
592 	if (f2fs_has_extra_attr(inode)) {
593 		ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
594 
595 		if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
596 			ri->i_inline_xattr_size =
597 				cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
598 
599 		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
600 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
601 								i_projid)) {
602 			projid_t i_projid;
603 
604 			i_projid = from_kprojid(&init_user_ns,
605 						F2FS_I(inode)->i_projid);
606 			ri->i_projid = cpu_to_le32(i_projid);
607 		}
608 
609 		if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
610 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
611 								i_crtime)) {
612 			ri->i_crtime =
613 				cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
614 			ri->i_crtime_nsec =
615 				cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
616 		}
617 
618 		if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
619 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
620 							i_log_cluster_size)) {
621 			ri->i_compr_blocks =
622 				cpu_to_le64(F2FS_I(inode)->i_compr_blocks);
623 			ri->i_compress_algorithm =
624 				F2FS_I(inode)->i_compress_algorithm;
625 			ri->i_log_cluster_size =
626 				F2FS_I(inode)->i_log_cluster_size;
627 		}
628 	}
629 
630 	__set_inode_rdev(inode, ri);
631 
632 	/* deleted inode */
633 	if (inode->i_nlink == 0)
634 		clear_inline_node(node_page);
635 
636 	F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
637 	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
638 	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
639 	F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
640 
641 #ifdef CONFIG_F2FS_CHECK_FS
642 	f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
643 #endif
644 }
645 
646 void f2fs_update_inode_page(struct inode *inode)
647 {
648 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
649 	struct page *node_page;
650 retry:
651 	node_page = f2fs_get_node_page(sbi, inode->i_ino);
652 	if (IS_ERR(node_page)) {
653 		int err = PTR_ERR(node_page);
654 		if (err == -ENOMEM) {
655 			cond_resched();
656 			goto retry;
657 		} else if (err != -ENOENT) {
658 			f2fs_stop_checkpoint(sbi, false);
659 		}
660 		return;
661 	}
662 	f2fs_update_inode(inode, node_page);
663 	f2fs_put_page(node_page, 1);
664 }
665 
666 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
667 {
668 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
669 
670 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
671 			inode->i_ino == F2FS_META_INO(sbi))
672 		return 0;
673 
674 	/*
675 	 * atime could be updated without dirtying f2fs inode in lazytime mode
676 	 */
677 	if (f2fs_is_time_consistent(inode) &&
678 		!is_inode_flag_set(inode, FI_DIRTY_INODE))
679 		return 0;
680 
681 	if (!f2fs_is_checkpoint_ready(sbi))
682 		return -ENOSPC;
683 
684 	/*
685 	 * We need to balance fs here to prevent from producing dirty node pages
686 	 * during the urgent cleaning time when runing out of free sections.
687 	 */
688 	f2fs_update_inode_page(inode);
689 	if (wbc && wbc->nr_to_write)
690 		f2fs_balance_fs(sbi, true);
691 	return 0;
692 }
693 
694 /*
695  * Called at the last iput() if i_nlink is zero
696  */
697 void f2fs_evict_inode(struct inode *inode)
698 {
699 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
700 	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
701 	int err = 0;
702 
703 	/* some remained atomic pages should discarded */
704 	if (f2fs_is_atomic_file(inode))
705 		f2fs_drop_inmem_pages(inode);
706 
707 	trace_f2fs_evict_inode(inode);
708 	truncate_inode_pages_final(&inode->i_data);
709 
710 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
711 			inode->i_ino == F2FS_META_INO(sbi))
712 		goto out_clear;
713 
714 	f2fs_bug_on(sbi, get_dirty_pages(inode));
715 	f2fs_remove_dirty_inode(inode);
716 
717 	f2fs_destroy_extent_tree(inode);
718 
719 	if (inode->i_nlink || is_bad_inode(inode))
720 		goto no_delete;
721 
722 	err = dquot_initialize(inode);
723 	if (err) {
724 		err = 0;
725 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
726 	}
727 
728 	f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
729 	f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
730 	f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
731 
732 	sb_start_intwrite(inode->i_sb);
733 	set_inode_flag(inode, FI_NO_ALLOC);
734 	i_size_write(inode, 0);
735 retry:
736 	if (F2FS_HAS_BLOCKS(inode))
737 		err = f2fs_truncate(inode);
738 
739 	if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
740 		f2fs_show_injection_info(sbi, FAULT_EVICT_INODE);
741 		err = -EIO;
742 	}
743 
744 	if (!err) {
745 		f2fs_lock_op(sbi);
746 		err = f2fs_remove_inode_page(inode);
747 		f2fs_unlock_op(sbi);
748 		if (err == -ENOENT)
749 			err = 0;
750 	}
751 
752 	/* give more chances, if ENOMEM case */
753 	if (err == -ENOMEM) {
754 		err = 0;
755 		goto retry;
756 	}
757 
758 	if (err) {
759 		f2fs_update_inode_page(inode);
760 		if (dquot_initialize_needed(inode))
761 			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
762 	}
763 	sb_end_intwrite(inode->i_sb);
764 no_delete:
765 	dquot_drop(inode);
766 
767 	stat_dec_inline_xattr(inode);
768 	stat_dec_inline_dir(inode);
769 	stat_dec_inline_inode(inode);
770 	stat_dec_compr_inode(inode);
771 	stat_sub_compr_blocks(inode, F2FS_I(inode)->i_compr_blocks);
772 
773 	if (likely(!f2fs_cp_error(sbi) &&
774 				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
775 		f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
776 	else
777 		f2fs_inode_synced(inode);
778 
779 	/* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
780 	if (inode->i_ino)
781 		invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
782 							inode->i_ino);
783 	if (xnid)
784 		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
785 	if (inode->i_nlink) {
786 		if (is_inode_flag_set(inode, FI_APPEND_WRITE))
787 			f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
788 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
789 			f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
790 	}
791 	if (is_inode_flag_set(inode, FI_FREE_NID)) {
792 		f2fs_alloc_nid_failed(sbi, inode->i_ino);
793 		clear_inode_flag(inode, FI_FREE_NID);
794 	} else {
795 		/*
796 		 * If xattr nid is corrupted, we can reach out error condition,
797 		 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
798 		 * In that case, f2fs_check_nid_range() is enough to give a clue.
799 		 */
800 	}
801 out_clear:
802 	fscrypt_put_encryption_info(inode);
803 	fsverity_cleanup_inode(inode);
804 	clear_inode(inode);
805 }
806 
807 /* caller should call f2fs_lock_op() */
808 void f2fs_handle_failed_inode(struct inode *inode)
809 {
810 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
811 	struct node_info ni;
812 	int err;
813 
814 	/*
815 	 * clear nlink of inode in order to release resource of inode
816 	 * immediately.
817 	 */
818 	clear_nlink(inode);
819 
820 	/*
821 	 * we must call this to avoid inode being remained as dirty, resulting
822 	 * in a panic when flushing dirty inodes in gdirty_list.
823 	 */
824 	f2fs_update_inode_page(inode);
825 	f2fs_inode_synced(inode);
826 
827 	/* don't make bad inode, since it becomes a regular file. */
828 	unlock_new_inode(inode);
829 
830 	/*
831 	 * Note: we should add inode to orphan list before f2fs_unlock_op()
832 	 * so we can prevent losing this orphan when encoutering checkpoint
833 	 * and following suddenly power-off.
834 	 */
835 	err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
836 	if (err) {
837 		set_sbi_flag(sbi, SBI_NEED_FSCK);
838 		f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
839 		goto out;
840 	}
841 
842 	if (ni.blk_addr != NULL_ADDR) {
843 		err = f2fs_acquire_orphan_inode(sbi);
844 		if (err) {
845 			set_sbi_flag(sbi, SBI_NEED_FSCK);
846 			f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
847 		} else {
848 			f2fs_add_orphan_inode(inode);
849 		}
850 		f2fs_alloc_nid_done(sbi, inode->i_ino);
851 	} else {
852 		set_inode_flag(inode, FI_FREE_NID);
853 	}
854 
855 out:
856 	f2fs_unlock_op(sbi);
857 
858 	/* iput will drop the inode object */
859 	iput(inode);
860 }
861