xref: /linux/fs/f2fs/inode.c (revision ebf68996de0ab250c5d520eb2291ab65643e9a1e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/inode.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/backing-dev.h>
12 #include <linux/writeback.h>
13 
14 #include "f2fs.h"
15 #include "node.h"
16 #include "segment.h"
17 #include "xattr.h"
18 
19 #include <trace/events/f2fs.h>
20 
21 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
22 {
23 	if (is_inode_flag_set(inode, FI_NEW_INODE))
24 		return;
25 
26 	if (f2fs_inode_dirtied(inode, sync))
27 		return;
28 
29 	mark_inode_dirty_sync(inode);
30 }
31 
32 void f2fs_set_inode_flags(struct inode *inode)
33 {
34 	unsigned int flags = F2FS_I(inode)->i_flags;
35 	unsigned int new_fl = 0;
36 
37 	if (flags & F2FS_SYNC_FL)
38 		new_fl |= S_SYNC;
39 	if (flags & F2FS_APPEND_FL)
40 		new_fl |= S_APPEND;
41 	if (flags & F2FS_IMMUTABLE_FL)
42 		new_fl |= S_IMMUTABLE;
43 	if (flags & F2FS_NOATIME_FL)
44 		new_fl |= S_NOATIME;
45 	if (flags & F2FS_DIRSYNC_FL)
46 		new_fl |= S_DIRSYNC;
47 	if (file_is_encrypt(inode))
48 		new_fl |= S_ENCRYPTED;
49 	inode_set_flags(inode, new_fl,
50 			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
51 			S_ENCRYPTED);
52 }
53 
54 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
55 {
56 	int extra_size = get_extra_isize(inode);
57 
58 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
59 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
60 		if (ri->i_addr[extra_size])
61 			inode->i_rdev = old_decode_dev(
62 				le32_to_cpu(ri->i_addr[extra_size]));
63 		else
64 			inode->i_rdev = new_decode_dev(
65 				le32_to_cpu(ri->i_addr[extra_size + 1]));
66 	}
67 }
68 
69 static int __written_first_block(struct f2fs_sb_info *sbi,
70 					struct f2fs_inode *ri)
71 {
72 	block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
73 
74 	if (!__is_valid_data_blkaddr(addr))
75 		return 1;
76 	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
77 		return -EFAULT;
78 	return 0;
79 }
80 
81 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
82 {
83 	int extra_size = get_extra_isize(inode);
84 
85 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
86 		if (old_valid_dev(inode->i_rdev)) {
87 			ri->i_addr[extra_size] =
88 				cpu_to_le32(old_encode_dev(inode->i_rdev));
89 			ri->i_addr[extra_size + 1] = 0;
90 		} else {
91 			ri->i_addr[extra_size] = 0;
92 			ri->i_addr[extra_size + 1] =
93 				cpu_to_le32(new_encode_dev(inode->i_rdev));
94 			ri->i_addr[extra_size + 2] = 0;
95 		}
96 	}
97 }
98 
99 static void __recover_inline_status(struct inode *inode, struct page *ipage)
100 {
101 	void *inline_data = inline_data_addr(inode, ipage);
102 	__le32 *start = inline_data;
103 	__le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
104 
105 	while (start < end) {
106 		if (*start++) {
107 			f2fs_wait_on_page_writeback(ipage, NODE, true, true);
108 
109 			set_inode_flag(inode, FI_DATA_EXIST);
110 			set_raw_inline(inode, F2FS_INODE(ipage));
111 			set_page_dirty(ipage);
112 			return;
113 		}
114 	}
115 	return;
116 }
117 
118 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
119 {
120 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
121 
122 	if (!f2fs_sb_has_inode_chksum(sbi))
123 		return false;
124 
125 	if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
126 		return false;
127 
128 	if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
129 				i_inode_checksum))
130 		return false;
131 
132 	return true;
133 }
134 
135 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
136 {
137 	struct f2fs_node *node = F2FS_NODE(page);
138 	struct f2fs_inode *ri = &node->i;
139 	__le32 ino = node->footer.ino;
140 	__le32 gen = ri->i_generation;
141 	__u32 chksum, chksum_seed;
142 	__u32 dummy_cs = 0;
143 	unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
144 	unsigned int cs_size = sizeof(dummy_cs);
145 
146 	chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
147 							sizeof(ino));
148 	chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
149 
150 	chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
151 	chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
152 	offset += cs_size;
153 	chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
154 						F2FS_BLKSIZE - offset);
155 	return chksum;
156 }
157 
158 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
159 {
160 	struct f2fs_inode *ri;
161 	__u32 provided, calculated;
162 
163 	if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
164 		return true;
165 
166 #ifdef CONFIG_F2FS_CHECK_FS
167 	if (!f2fs_enable_inode_chksum(sbi, page))
168 #else
169 	if (!f2fs_enable_inode_chksum(sbi, page) ||
170 			PageDirty(page) || PageWriteback(page))
171 #endif
172 		return true;
173 
174 	ri = &F2FS_NODE(page)->i;
175 	provided = le32_to_cpu(ri->i_inode_checksum);
176 	calculated = f2fs_inode_chksum(sbi, page);
177 
178 	if (provided != calculated)
179 		f2fs_msg(sbi->sb, KERN_WARNING,
180 			"checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
181 			page->index, ino_of_node(page), provided, calculated);
182 
183 	return provided == calculated;
184 }
185 
186 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
187 {
188 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
189 
190 	if (!f2fs_enable_inode_chksum(sbi, page))
191 		return;
192 
193 	ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
194 }
195 
196 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
197 {
198 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
199 	struct f2fs_inode_info *fi = F2FS_I(inode);
200 	unsigned long long iblocks;
201 
202 	iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
203 	if (!iblocks) {
204 		set_sbi_flag(sbi, SBI_NEED_FSCK);
205 		f2fs_msg(sbi->sb, KERN_WARNING,
206 			"%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
207 			"run fsck to fix.",
208 			__func__, inode->i_ino, iblocks);
209 		return false;
210 	}
211 
212 	if (ino_of_node(node_page) != nid_of_node(node_page)) {
213 		set_sbi_flag(sbi, SBI_NEED_FSCK);
214 		f2fs_msg(sbi->sb, KERN_WARNING,
215 			"%s: corrupted inode footer i_ino=%lx, ino,nid: "
216 			"[%u, %u] run fsck to fix.",
217 			__func__, inode->i_ino,
218 			ino_of_node(node_page), nid_of_node(node_page));
219 		return false;
220 	}
221 
222 	if (f2fs_sb_has_flexible_inline_xattr(sbi)
223 			&& !f2fs_has_extra_attr(inode)) {
224 		set_sbi_flag(sbi, SBI_NEED_FSCK);
225 		f2fs_msg(sbi->sb, KERN_WARNING,
226 			"%s: corrupted inode ino=%lx, run fsck to fix.",
227 			__func__, inode->i_ino);
228 		return false;
229 	}
230 
231 	if (f2fs_has_extra_attr(inode) &&
232 			!f2fs_sb_has_extra_attr(sbi)) {
233 		set_sbi_flag(sbi, SBI_NEED_FSCK);
234 		f2fs_msg(sbi->sb, KERN_WARNING,
235 			"%s: inode (ino=%lx) is with extra_attr, "
236 			"but extra_attr feature is off",
237 			__func__, inode->i_ino);
238 		return false;
239 	}
240 
241 	if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
242 			fi->i_extra_isize % sizeof(__le32)) {
243 		set_sbi_flag(sbi, SBI_NEED_FSCK);
244 		f2fs_msg(sbi->sb, KERN_WARNING,
245 			"%s: inode (ino=%lx) has corrupted i_extra_isize: %d, "
246 			"max: %zu",
247 			__func__, inode->i_ino, fi->i_extra_isize,
248 			F2FS_TOTAL_EXTRA_ATTR_SIZE);
249 		return false;
250 	}
251 
252 	if (f2fs_has_extra_attr(inode) &&
253 		f2fs_sb_has_flexible_inline_xattr(sbi) &&
254 		f2fs_has_inline_xattr(inode) &&
255 		(!fi->i_inline_xattr_size ||
256 		fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
257 		set_sbi_flag(sbi, SBI_NEED_FSCK);
258 		f2fs_msg(sbi->sb, KERN_WARNING,
259 			"%s: inode (ino=%lx) has corrupted "
260 			"i_inline_xattr_size: %d, max: %zu",
261 			__func__, inode->i_ino, fi->i_inline_xattr_size,
262 			MAX_INLINE_XATTR_SIZE);
263 		return false;
264 	}
265 
266 	if (F2FS_I(inode)->extent_tree) {
267 		struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
268 
269 		if (ei->len &&
270 			(!f2fs_is_valid_blkaddr(sbi, ei->blk,
271 						DATA_GENERIC_ENHANCE) ||
272 			!f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
273 						DATA_GENERIC_ENHANCE))) {
274 			set_sbi_flag(sbi, SBI_NEED_FSCK);
275 			f2fs_msg(sbi->sb, KERN_WARNING,
276 				"%s: inode (ino=%lx) extent info [%u, %u, %u] "
277 				"is incorrect, run fsck to fix",
278 				__func__, inode->i_ino,
279 				ei->blk, ei->fofs, ei->len);
280 			return false;
281 		}
282 	}
283 
284 	if (f2fs_has_inline_data(inode) &&
285 			(!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
286 		set_sbi_flag(sbi, SBI_NEED_FSCK);
287 		f2fs_msg(sbi->sb, KERN_WARNING,
288 			"%s: inode (ino=%lx, mode=%u) should not have "
289 			"inline_data, run fsck to fix",
290 			__func__, inode->i_ino, inode->i_mode);
291 		return false;
292 	}
293 
294 	if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
295 		set_sbi_flag(sbi, SBI_NEED_FSCK);
296 		f2fs_msg(sbi->sb, KERN_WARNING,
297 			"%s: inode (ino=%lx, mode=%u) should not have "
298 			"inline_dentry, run fsck to fix",
299 			__func__, inode->i_ino, inode->i_mode);
300 		return false;
301 	}
302 
303 	return true;
304 }
305 
306 static int do_read_inode(struct inode *inode)
307 {
308 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
309 	struct f2fs_inode_info *fi = F2FS_I(inode);
310 	struct page *node_page;
311 	struct f2fs_inode *ri;
312 	projid_t i_projid;
313 	int err;
314 
315 	/* Check if ino is within scope */
316 	if (f2fs_check_nid_range(sbi, inode->i_ino))
317 		return -EINVAL;
318 
319 	node_page = f2fs_get_node_page(sbi, inode->i_ino);
320 	if (IS_ERR(node_page))
321 		return PTR_ERR(node_page);
322 
323 	ri = F2FS_INODE(node_page);
324 
325 	inode->i_mode = le16_to_cpu(ri->i_mode);
326 	i_uid_write(inode, le32_to_cpu(ri->i_uid));
327 	i_gid_write(inode, le32_to_cpu(ri->i_gid));
328 	set_nlink(inode, le32_to_cpu(ri->i_links));
329 	inode->i_size = le64_to_cpu(ri->i_size);
330 	inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
331 
332 	inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
333 	inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
334 	inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
335 	inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
336 	inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
337 	inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
338 	inode->i_generation = le32_to_cpu(ri->i_generation);
339 	if (S_ISDIR(inode->i_mode))
340 		fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
341 	else if (S_ISREG(inode->i_mode))
342 		fi->i_gc_failures[GC_FAILURE_PIN] =
343 					le16_to_cpu(ri->i_gc_failures);
344 	fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
345 	fi->i_flags = le32_to_cpu(ri->i_flags);
346 	fi->flags = 0;
347 	fi->i_advise = ri->i_advise;
348 	fi->i_pino = le32_to_cpu(ri->i_pino);
349 	fi->i_dir_level = ri->i_dir_level;
350 
351 	if (f2fs_init_extent_tree(inode, &ri->i_ext))
352 		set_page_dirty(node_page);
353 
354 	get_inline_info(inode, ri);
355 
356 	fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
357 					le16_to_cpu(ri->i_extra_isize) : 0;
358 
359 	if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
360 		fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
361 	} else if (f2fs_has_inline_xattr(inode) ||
362 				f2fs_has_inline_dentry(inode)) {
363 		fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
364 	} else {
365 
366 		/*
367 		 * Previous inline data or directory always reserved 200 bytes
368 		 * in inode layout, even if inline_xattr is disabled. In order
369 		 * to keep inline_dentry's structure for backward compatibility,
370 		 * we get the space back only from inline_data.
371 		 */
372 		fi->i_inline_xattr_size = 0;
373 	}
374 
375 	if (!sanity_check_inode(inode, node_page)) {
376 		f2fs_put_page(node_page, 1);
377 		return -EINVAL;
378 	}
379 
380 	/* check data exist */
381 	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
382 		__recover_inline_status(inode, node_page);
383 
384 	/* try to recover cold bit for non-dir inode */
385 	if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
386 		set_cold_node(node_page, false);
387 		set_page_dirty(node_page);
388 	}
389 
390 	/* get rdev by using inline_info */
391 	__get_inode_rdev(inode, ri);
392 
393 	if (S_ISREG(inode->i_mode)) {
394 		err = __written_first_block(sbi, ri);
395 		if (err < 0) {
396 			f2fs_put_page(node_page, 1);
397 			return err;
398 		}
399 		if (!err)
400 			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
401 	}
402 
403 	if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
404 		fi->last_disk_size = inode->i_size;
405 
406 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
407 		set_inode_flag(inode, FI_PROJ_INHERIT);
408 
409 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
410 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
411 		i_projid = (projid_t)le32_to_cpu(ri->i_projid);
412 	else
413 		i_projid = F2FS_DEF_PROJID;
414 	fi->i_projid = make_kprojid(&init_user_ns, i_projid);
415 
416 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
417 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
418 		fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
419 		fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
420 	}
421 
422 	F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
423 	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
424 	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
425 	F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
426 	f2fs_put_page(node_page, 1);
427 
428 	stat_inc_inline_xattr(inode);
429 	stat_inc_inline_inode(inode);
430 	stat_inc_inline_dir(inode);
431 
432 	return 0;
433 }
434 
435 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
436 {
437 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
438 	struct inode *inode;
439 	int ret = 0;
440 
441 	inode = iget_locked(sb, ino);
442 	if (!inode)
443 		return ERR_PTR(-ENOMEM);
444 
445 	if (!(inode->i_state & I_NEW)) {
446 		trace_f2fs_iget(inode);
447 		return inode;
448 	}
449 	if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
450 		goto make_now;
451 
452 	ret = do_read_inode(inode);
453 	if (ret)
454 		goto bad_inode;
455 make_now:
456 	if (ino == F2FS_NODE_INO(sbi)) {
457 		inode->i_mapping->a_ops = &f2fs_node_aops;
458 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
459 	} else if (ino == F2FS_META_INO(sbi)) {
460 		inode->i_mapping->a_ops = &f2fs_meta_aops;
461 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
462 	} else if (S_ISREG(inode->i_mode)) {
463 		inode->i_op = &f2fs_file_inode_operations;
464 		inode->i_fop = &f2fs_file_operations;
465 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
466 	} else if (S_ISDIR(inode->i_mode)) {
467 		inode->i_op = &f2fs_dir_inode_operations;
468 		inode->i_fop = &f2fs_dir_operations;
469 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
470 		inode_nohighmem(inode);
471 	} else if (S_ISLNK(inode->i_mode)) {
472 		if (file_is_encrypt(inode))
473 			inode->i_op = &f2fs_encrypted_symlink_inode_operations;
474 		else
475 			inode->i_op = &f2fs_symlink_inode_operations;
476 		inode_nohighmem(inode);
477 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
478 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
479 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
480 		inode->i_op = &f2fs_special_inode_operations;
481 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
482 	} else {
483 		ret = -EIO;
484 		goto bad_inode;
485 	}
486 	f2fs_set_inode_flags(inode);
487 	unlock_new_inode(inode);
488 	trace_f2fs_iget(inode);
489 	return inode;
490 
491 bad_inode:
492 	f2fs_inode_synced(inode);
493 	iget_failed(inode);
494 	trace_f2fs_iget_exit(inode, ret);
495 	return ERR_PTR(ret);
496 }
497 
498 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
499 {
500 	struct inode *inode;
501 retry:
502 	inode = f2fs_iget(sb, ino);
503 	if (IS_ERR(inode)) {
504 		if (PTR_ERR(inode) == -ENOMEM) {
505 			congestion_wait(BLK_RW_ASYNC, HZ/50);
506 			goto retry;
507 		}
508 	}
509 	return inode;
510 }
511 
512 void f2fs_update_inode(struct inode *inode, struct page *node_page)
513 {
514 	struct f2fs_inode *ri;
515 	struct extent_tree *et = F2FS_I(inode)->extent_tree;
516 
517 	f2fs_wait_on_page_writeback(node_page, NODE, true, true);
518 	set_page_dirty(node_page);
519 
520 	f2fs_inode_synced(inode);
521 
522 	ri = F2FS_INODE(node_page);
523 
524 	ri->i_mode = cpu_to_le16(inode->i_mode);
525 	ri->i_advise = F2FS_I(inode)->i_advise;
526 	ri->i_uid = cpu_to_le32(i_uid_read(inode));
527 	ri->i_gid = cpu_to_le32(i_gid_read(inode));
528 	ri->i_links = cpu_to_le32(inode->i_nlink);
529 	ri->i_size = cpu_to_le64(i_size_read(inode));
530 	ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
531 
532 	if (et) {
533 		read_lock(&et->lock);
534 		set_raw_extent(&et->largest, &ri->i_ext);
535 		read_unlock(&et->lock);
536 	} else {
537 		memset(&ri->i_ext, 0, sizeof(ri->i_ext));
538 	}
539 	set_raw_inline(inode, ri);
540 
541 	ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
542 	ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
543 	ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
544 	ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
545 	ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
546 	ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
547 	if (S_ISDIR(inode->i_mode))
548 		ri->i_current_depth =
549 			cpu_to_le32(F2FS_I(inode)->i_current_depth);
550 	else if (S_ISREG(inode->i_mode))
551 		ri->i_gc_failures =
552 			cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
553 	ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
554 	ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
555 	ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
556 	ri->i_generation = cpu_to_le32(inode->i_generation);
557 	ri->i_dir_level = F2FS_I(inode)->i_dir_level;
558 
559 	if (f2fs_has_extra_attr(inode)) {
560 		ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
561 
562 		if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
563 			ri->i_inline_xattr_size =
564 				cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
565 
566 		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
567 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
568 								i_projid)) {
569 			projid_t i_projid;
570 
571 			i_projid = from_kprojid(&init_user_ns,
572 						F2FS_I(inode)->i_projid);
573 			ri->i_projid = cpu_to_le32(i_projid);
574 		}
575 
576 		if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
577 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
578 								i_crtime)) {
579 			ri->i_crtime =
580 				cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
581 			ri->i_crtime_nsec =
582 				cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
583 		}
584 	}
585 
586 	__set_inode_rdev(inode, ri);
587 
588 	/* deleted inode */
589 	if (inode->i_nlink == 0)
590 		clear_inline_node(node_page);
591 
592 	F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
593 	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
594 	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
595 	F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
596 
597 #ifdef CONFIG_F2FS_CHECK_FS
598 	f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
599 #endif
600 }
601 
602 void f2fs_update_inode_page(struct inode *inode)
603 {
604 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
605 	struct page *node_page;
606 retry:
607 	node_page = f2fs_get_node_page(sbi, inode->i_ino);
608 	if (IS_ERR(node_page)) {
609 		int err = PTR_ERR(node_page);
610 		if (err == -ENOMEM) {
611 			cond_resched();
612 			goto retry;
613 		} else if (err != -ENOENT) {
614 			f2fs_stop_checkpoint(sbi, false);
615 		}
616 		return;
617 	}
618 	f2fs_update_inode(inode, node_page);
619 	f2fs_put_page(node_page, 1);
620 }
621 
622 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
623 {
624 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
625 
626 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
627 			inode->i_ino == F2FS_META_INO(sbi))
628 		return 0;
629 
630 	if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
631 		return 0;
632 
633 	if (f2fs_is_checkpoint_ready(sbi))
634 		return -ENOSPC;
635 
636 	/*
637 	 * We need to balance fs here to prevent from producing dirty node pages
638 	 * during the urgent cleaning time when runing out of free sections.
639 	 */
640 	f2fs_update_inode_page(inode);
641 	if (wbc && wbc->nr_to_write)
642 		f2fs_balance_fs(sbi, true);
643 	return 0;
644 }
645 
646 /*
647  * Called at the last iput() if i_nlink is zero
648  */
649 void f2fs_evict_inode(struct inode *inode)
650 {
651 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
652 	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
653 	int err = 0;
654 
655 	/* some remained atomic pages should discarded */
656 	if (f2fs_is_atomic_file(inode))
657 		f2fs_drop_inmem_pages(inode);
658 
659 	trace_f2fs_evict_inode(inode);
660 	truncate_inode_pages_final(&inode->i_data);
661 
662 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
663 			inode->i_ino == F2FS_META_INO(sbi))
664 		goto out_clear;
665 
666 	f2fs_bug_on(sbi, get_dirty_pages(inode));
667 	f2fs_remove_dirty_inode(inode);
668 
669 	f2fs_destroy_extent_tree(inode);
670 
671 	if (inode->i_nlink || is_bad_inode(inode))
672 		goto no_delete;
673 
674 	err = dquot_initialize(inode);
675 	if (err) {
676 		err = 0;
677 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
678 	}
679 
680 	f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
681 	f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
682 	f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
683 
684 	sb_start_intwrite(inode->i_sb);
685 	set_inode_flag(inode, FI_NO_ALLOC);
686 	i_size_write(inode, 0);
687 retry:
688 	if (F2FS_HAS_BLOCKS(inode))
689 		err = f2fs_truncate(inode);
690 
691 	if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
692 		f2fs_show_injection_info(FAULT_EVICT_INODE);
693 		err = -EIO;
694 	}
695 
696 	if (!err) {
697 		f2fs_lock_op(sbi);
698 		err = f2fs_remove_inode_page(inode);
699 		f2fs_unlock_op(sbi);
700 		if (err == -ENOENT)
701 			err = 0;
702 	}
703 
704 	/* give more chances, if ENOMEM case */
705 	if (err == -ENOMEM) {
706 		err = 0;
707 		goto retry;
708 	}
709 
710 	if (err) {
711 		f2fs_update_inode_page(inode);
712 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
713 	}
714 	sb_end_intwrite(inode->i_sb);
715 no_delete:
716 	dquot_drop(inode);
717 
718 	stat_dec_inline_xattr(inode);
719 	stat_dec_inline_dir(inode);
720 	stat_dec_inline_inode(inode);
721 
722 	if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG) &&
723 				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
724 		f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
725 	else
726 		f2fs_inode_synced(inode);
727 
728 	/* ino == 0, if f2fs_new_inode() was failed t*/
729 	if (inode->i_ino)
730 		invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
731 							inode->i_ino);
732 	if (xnid)
733 		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
734 	if (inode->i_nlink) {
735 		if (is_inode_flag_set(inode, FI_APPEND_WRITE))
736 			f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
737 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
738 			f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
739 	}
740 	if (is_inode_flag_set(inode, FI_FREE_NID)) {
741 		f2fs_alloc_nid_failed(sbi, inode->i_ino);
742 		clear_inode_flag(inode, FI_FREE_NID);
743 	} else {
744 		/*
745 		 * If xattr nid is corrupted, we can reach out error condition,
746 		 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
747 		 * In that case, f2fs_check_nid_range() is enough to give a clue.
748 		 */
749 	}
750 out_clear:
751 	fscrypt_put_encryption_info(inode);
752 	clear_inode(inode);
753 }
754 
755 /* caller should call f2fs_lock_op() */
756 void f2fs_handle_failed_inode(struct inode *inode)
757 {
758 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
759 	struct node_info ni;
760 	int err;
761 
762 	/*
763 	 * clear nlink of inode in order to release resource of inode
764 	 * immediately.
765 	 */
766 	clear_nlink(inode);
767 
768 	/*
769 	 * we must call this to avoid inode being remained as dirty, resulting
770 	 * in a panic when flushing dirty inodes in gdirty_list.
771 	 */
772 	f2fs_update_inode_page(inode);
773 	f2fs_inode_synced(inode);
774 
775 	/* don't make bad inode, since it becomes a regular file. */
776 	unlock_new_inode(inode);
777 
778 	/*
779 	 * Note: we should add inode to orphan list before f2fs_unlock_op()
780 	 * so we can prevent losing this orphan when encoutering checkpoint
781 	 * and following suddenly power-off.
782 	 */
783 	err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
784 	if (err) {
785 		set_sbi_flag(sbi, SBI_NEED_FSCK);
786 		f2fs_msg(sbi->sb, KERN_WARNING,
787 			"May loss orphan inode, run fsck to fix.");
788 		goto out;
789 	}
790 
791 	if (ni.blk_addr != NULL_ADDR) {
792 		err = f2fs_acquire_orphan_inode(sbi);
793 		if (err) {
794 			set_sbi_flag(sbi, SBI_NEED_FSCK);
795 			f2fs_msg(sbi->sb, KERN_WARNING,
796 				"Too many orphan inodes, run fsck to fix.");
797 		} else {
798 			f2fs_add_orphan_inode(inode);
799 		}
800 		f2fs_alloc_nid_done(sbi, inode->i_ino);
801 	} else {
802 		set_inode_flag(inode, FI_FREE_NID);
803 	}
804 
805 out:
806 	f2fs_unlock_op(sbi);
807 
808 	/* iput will drop the inode object */
809 	iput(inode);
810 }
811