xref: /linux/fs/f2fs/recovery.c (revision ca853314e78b0a65c20b6a889a23c31f918d4aa2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/recovery.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <asm/unaligned.h>
9 #include <linux/fs.h>
10 #include <linux/f2fs_fs.h>
11 #include "f2fs.h"
12 #include "node.h"
13 #include "segment.h"
14 
15 /*
16  * Roll forward recovery scenarios.
17  *
18  * [Term] F: fsync_mark, D: dentry_mark
19  *
20  * 1. inode(x) | CP | inode(x) | dnode(F)
21  * -> Update the latest inode(x).
22  *
23  * 2. inode(x) | CP | inode(F) | dnode(F)
24  * -> No problem.
25  *
26  * 3. inode(x) | CP | dnode(F) | inode(x)
27  * -> Recover to the latest dnode(F), and drop the last inode(x)
28  *
29  * 4. inode(x) | CP | dnode(F) | inode(F)
30  * -> No problem.
31  *
32  * 5. CP | inode(x) | dnode(F)
33  * -> The inode(DF) was missing. Should drop this dnode(F).
34  *
35  * 6. CP | inode(DF) | dnode(F)
36  * -> No problem.
37  *
38  * 7. CP | dnode(F) | inode(DF)
39  * -> If f2fs_iget fails, then goto next to find inode(DF).
40  *
41  * 8. CP | dnode(F) | inode(x)
42  * -> If f2fs_iget fails, then goto next to find inode(DF).
43  *    But it will fail due to no inode(DF).
44  */
45 
46 static struct kmem_cache *fsync_entry_slab;
47 
48 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
49 {
50 	s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
51 
52 	if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
53 		return false;
54 	return true;
55 }
56 
57 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
58 								nid_t ino)
59 {
60 	struct fsync_inode_entry *entry;
61 
62 	list_for_each_entry(entry, head, list)
63 		if (entry->inode->i_ino == ino)
64 			return entry;
65 
66 	return NULL;
67 }
68 
69 static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
70 			struct list_head *head, nid_t ino, bool quota_inode)
71 {
72 	struct inode *inode;
73 	struct fsync_inode_entry *entry;
74 	int err;
75 
76 	inode = f2fs_iget_retry(sbi->sb, ino);
77 	if (IS_ERR(inode))
78 		return ERR_CAST(inode);
79 
80 	err = dquot_initialize(inode);
81 	if (err)
82 		goto err_out;
83 
84 	if (quota_inode) {
85 		err = dquot_alloc_inode(inode);
86 		if (err)
87 			goto err_out;
88 	}
89 
90 	entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
91 	entry->inode = inode;
92 	list_add_tail(&entry->list, head);
93 
94 	return entry;
95 err_out:
96 	iput(inode);
97 	return ERR_PTR(err);
98 }
99 
100 static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
101 {
102 	if (drop) {
103 		/* inode should not be recovered, drop it */
104 		f2fs_inode_synced(entry->inode);
105 	}
106 	iput(entry->inode);
107 	list_del(&entry->list);
108 	kmem_cache_free(fsync_entry_slab, entry);
109 }
110 
111 static int init_recovered_filename(const struct inode *dir,
112 				   struct f2fs_inode *raw_inode,
113 				   struct f2fs_filename *fname,
114 				   struct qstr *usr_fname)
115 {
116 	int err;
117 
118 	memset(fname, 0, sizeof(*fname));
119 	fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen);
120 	fname->disk_name.name = raw_inode->i_name;
121 
122 	if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN))
123 		return -ENAMETOOLONG;
124 
125 	if (!IS_ENCRYPTED(dir)) {
126 		usr_fname->name = fname->disk_name.name;
127 		usr_fname->len = fname->disk_name.len;
128 		fname->usr_fname = usr_fname;
129 	}
130 
131 	/* Compute the hash of the filename */
132 	if (IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir)) {
133 		/*
134 		 * In this case the hash isn't computable without the key, so it
135 		 * was saved on-disk.
136 		 */
137 		if (fname->disk_name.len + sizeof(f2fs_hash_t) > F2FS_NAME_LEN)
138 			return -EINVAL;
139 		fname->hash = get_unaligned((f2fs_hash_t *)
140 				&raw_inode->i_name[fname->disk_name.len]);
141 	} else if (IS_CASEFOLDED(dir)) {
142 		err = f2fs_init_casefolded_name(dir, fname);
143 		if (err)
144 			return err;
145 		f2fs_hash_filename(dir, fname);
146 #ifdef CONFIG_UNICODE
147 		/* Case-sensitive match is fine for recovery */
148 		kfree(fname->cf_name.name);
149 		fname->cf_name.name = NULL;
150 #endif
151 	} else {
152 		f2fs_hash_filename(dir, fname);
153 	}
154 	return 0;
155 }
156 
157 static int recover_dentry(struct inode *inode, struct page *ipage,
158 						struct list_head *dir_list)
159 {
160 	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
161 	nid_t pino = le32_to_cpu(raw_inode->i_pino);
162 	struct f2fs_dir_entry *de;
163 	struct f2fs_filename fname;
164 	struct qstr usr_fname;
165 	struct page *page;
166 	struct inode *dir, *einode;
167 	struct fsync_inode_entry *entry;
168 	int err = 0;
169 	char *name;
170 
171 	entry = get_fsync_inode(dir_list, pino);
172 	if (!entry) {
173 		entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
174 							pino, false);
175 		if (IS_ERR(entry)) {
176 			dir = ERR_CAST(entry);
177 			err = PTR_ERR(entry);
178 			goto out;
179 		}
180 	}
181 
182 	dir = entry->inode;
183 	err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname);
184 	if (err)
185 		goto out;
186 retry:
187 	de = __f2fs_find_entry(dir, &fname, &page);
188 	if (de && inode->i_ino == le32_to_cpu(de->ino))
189 		goto out_put;
190 
191 	if (de) {
192 		einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
193 		if (IS_ERR(einode)) {
194 			WARN_ON(1);
195 			err = PTR_ERR(einode);
196 			if (err == -ENOENT)
197 				err = -EEXIST;
198 			goto out_put;
199 		}
200 
201 		err = dquot_initialize(einode);
202 		if (err) {
203 			iput(einode);
204 			goto out_put;
205 		}
206 
207 		err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
208 		if (err) {
209 			iput(einode);
210 			goto out_put;
211 		}
212 		f2fs_delete_entry(de, page, dir, einode);
213 		iput(einode);
214 		goto retry;
215 	} else if (IS_ERR(page)) {
216 		err = PTR_ERR(page);
217 	} else {
218 		err = f2fs_add_dentry(dir, &fname, inode,
219 					inode->i_ino, inode->i_mode);
220 	}
221 	if (err == -ENOMEM)
222 		goto retry;
223 	goto out;
224 
225 out_put:
226 	f2fs_put_page(page, 0);
227 out:
228 	if (file_enc_name(inode))
229 		name = "<encrypted>";
230 	else
231 		name = raw_inode->i_name;
232 	f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
233 		    __func__, ino_of_node(ipage), name,
234 		    IS_ERR(dir) ? 0 : dir->i_ino, err);
235 	return err;
236 }
237 
238 static int recover_quota_data(struct inode *inode, struct page *page)
239 {
240 	struct f2fs_inode *raw = F2FS_INODE(page);
241 	struct iattr attr;
242 	uid_t i_uid = le32_to_cpu(raw->i_uid);
243 	gid_t i_gid = le32_to_cpu(raw->i_gid);
244 	int err;
245 
246 	memset(&attr, 0, sizeof(attr));
247 
248 	attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
249 	attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
250 
251 	if (!uid_eq(attr.ia_uid, inode->i_uid))
252 		attr.ia_valid |= ATTR_UID;
253 	if (!gid_eq(attr.ia_gid, inode->i_gid))
254 		attr.ia_valid |= ATTR_GID;
255 
256 	if (!attr.ia_valid)
257 		return 0;
258 
259 	err = dquot_transfer(inode, &attr);
260 	if (err)
261 		set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
262 	return err;
263 }
264 
265 static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
266 {
267 	if (ri->i_inline & F2FS_PIN_FILE)
268 		set_inode_flag(inode, FI_PIN_FILE);
269 	else
270 		clear_inode_flag(inode, FI_PIN_FILE);
271 	if (ri->i_inline & F2FS_DATA_EXIST)
272 		set_inode_flag(inode, FI_DATA_EXIST);
273 	else
274 		clear_inode_flag(inode, FI_DATA_EXIST);
275 }
276 
277 static int recover_inode(struct inode *inode, struct page *page)
278 {
279 	struct f2fs_inode *raw = F2FS_INODE(page);
280 	char *name;
281 	int err;
282 
283 	inode->i_mode = le16_to_cpu(raw->i_mode);
284 
285 	err = recover_quota_data(inode, page);
286 	if (err)
287 		return err;
288 
289 	i_uid_write(inode, le32_to_cpu(raw->i_uid));
290 	i_gid_write(inode, le32_to_cpu(raw->i_gid));
291 
292 	if (raw->i_inline & F2FS_EXTRA_ATTR) {
293 		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
294 			F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
295 								i_projid)) {
296 			projid_t i_projid;
297 			kprojid_t kprojid;
298 
299 			i_projid = (projid_t)le32_to_cpu(raw->i_projid);
300 			kprojid = make_kprojid(&init_user_ns, i_projid);
301 
302 			if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
303 				err = f2fs_transfer_project_quota(inode,
304 								kprojid);
305 				if (err)
306 					return err;
307 				F2FS_I(inode)->i_projid = kprojid;
308 			}
309 		}
310 	}
311 
312 	f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
313 	inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
314 	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
315 	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
316 	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
317 	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
318 	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
319 
320 	F2FS_I(inode)->i_advise = raw->i_advise;
321 	F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
322 	f2fs_set_inode_flags(inode);
323 	F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
324 				le16_to_cpu(raw->i_gc_failures);
325 
326 	recover_inline_flags(inode, raw);
327 
328 	f2fs_mark_inode_dirty_sync(inode, true);
329 
330 	if (file_enc_name(inode))
331 		name = "<encrypted>";
332 	else
333 		name = F2FS_INODE(page)->i_name;
334 
335 	f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
336 		    ino_of_node(page), name, raw->i_inline);
337 	return 0;
338 }
339 
340 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
341 				bool check_only)
342 {
343 	struct curseg_info *curseg;
344 	struct page *page = NULL;
345 	block_t blkaddr;
346 	unsigned int loop_cnt = 0;
347 	unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
348 						valid_user_blocks(sbi);
349 	int err = 0;
350 
351 	/* get node pages in the current segment */
352 	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
353 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
354 
355 	while (1) {
356 		struct fsync_inode_entry *entry;
357 
358 		if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
359 			return 0;
360 
361 		page = f2fs_get_tmp_page(sbi, blkaddr);
362 		if (IS_ERR(page)) {
363 			err = PTR_ERR(page);
364 			break;
365 		}
366 
367 		if (!is_recoverable_dnode(page)) {
368 			f2fs_put_page(page, 1);
369 			break;
370 		}
371 
372 		if (!is_fsync_dnode(page))
373 			goto next;
374 
375 		entry = get_fsync_inode(head, ino_of_node(page));
376 		if (!entry) {
377 			bool quota_inode = false;
378 
379 			if (!check_only &&
380 					IS_INODE(page) && is_dent_dnode(page)) {
381 				err = f2fs_recover_inode_page(sbi, page);
382 				if (err) {
383 					f2fs_put_page(page, 1);
384 					break;
385 				}
386 				quota_inode = true;
387 			}
388 
389 			/*
390 			 * CP | dnode(F) | inode(DF)
391 			 * For this case, we should not give up now.
392 			 */
393 			entry = add_fsync_inode(sbi, head, ino_of_node(page),
394 								quota_inode);
395 			if (IS_ERR(entry)) {
396 				err = PTR_ERR(entry);
397 				if (err == -ENOENT) {
398 					err = 0;
399 					goto next;
400 				}
401 				f2fs_put_page(page, 1);
402 				break;
403 			}
404 		}
405 		entry->blkaddr = blkaddr;
406 
407 		if (IS_INODE(page) && is_dent_dnode(page))
408 			entry->last_dentry = blkaddr;
409 next:
410 		/* sanity check in order to detect looped node chain */
411 		if (++loop_cnt >= free_blocks ||
412 			blkaddr == next_blkaddr_of_node(page)) {
413 			f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
414 				    __func__, blkaddr,
415 				    next_blkaddr_of_node(page));
416 			f2fs_put_page(page, 1);
417 			err = -EINVAL;
418 			break;
419 		}
420 
421 		/* check next segment */
422 		blkaddr = next_blkaddr_of_node(page);
423 		f2fs_put_page(page, 1);
424 
425 		f2fs_ra_meta_pages_cond(sbi, blkaddr);
426 	}
427 	return err;
428 }
429 
430 static void destroy_fsync_dnodes(struct list_head *head, int drop)
431 {
432 	struct fsync_inode_entry *entry, *tmp;
433 
434 	list_for_each_entry_safe(entry, tmp, head, list)
435 		del_fsync_inode(entry, drop);
436 }
437 
438 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
439 			block_t blkaddr, struct dnode_of_data *dn)
440 {
441 	struct seg_entry *sentry;
442 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
443 	unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
444 	struct f2fs_summary_block *sum_node;
445 	struct f2fs_summary sum;
446 	struct page *sum_page, *node_page;
447 	struct dnode_of_data tdn = *dn;
448 	nid_t ino, nid;
449 	struct inode *inode;
450 	unsigned int offset;
451 	block_t bidx;
452 	int i;
453 
454 	sentry = get_seg_entry(sbi, segno);
455 	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
456 		return 0;
457 
458 	/* Get the previous summary */
459 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
460 		struct curseg_info *curseg = CURSEG_I(sbi, i);
461 		if (curseg->segno == segno) {
462 			sum = curseg->sum_blk->entries[blkoff];
463 			goto got_it;
464 		}
465 	}
466 
467 	sum_page = f2fs_get_sum_page(sbi, segno);
468 	if (IS_ERR(sum_page))
469 		return PTR_ERR(sum_page);
470 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
471 	sum = sum_node->entries[blkoff];
472 	f2fs_put_page(sum_page, 1);
473 got_it:
474 	/* Use the locked dnode page and inode */
475 	nid = le32_to_cpu(sum.nid);
476 	if (dn->inode->i_ino == nid) {
477 		tdn.nid = nid;
478 		if (!dn->inode_page_locked)
479 			lock_page(dn->inode_page);
480 		tdn.node_page = dn->inode_page;
481 		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
482 		goto truncate_out;
483 	} else if (dn->nid == nid) {
484 		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
485 		goto truncate_out;
486 	}
487 
488 	/* Get the node page */
489 	node_page = f2fs_get_node_page(sbi, nid);
490 	if (IS_ERR(node_page))
491 		return PTR_ERR(node_page);
492 
493 	offset = ofs_of_node(node_page);
494 	ino = ino_of_node(node_page);
495 	f2fs_put_page(node_page, 1);
496 
497 	if (ino != dn->inode->i_ino) {
498 		int ret;
499 
500 		/* Deallocate previous index in the node page */
501 		inode = f2fs_iget_retry(sbi->sb, ino);
502 		if (IS_ERR(inode))
503 			return PTR_ERR(inode);
504 
505 		ret = dquot_initialize(inode);
506 		if (ret) {
507 			iput(inode);
508 			return ret;
509 		}
510 	} else {
511 		inode = dn->inode;
512 	}
513 
514 	bidx = f2fs_start_bidx_of_node(offset, inode) +
515 				le16_to_cpu(sum.ofs_in_node);
516 
517 	/*
518 	 * if inode page is locked, unlock temporarily, but its reference
519 	 * count keeps alive.
520 	 */
521 	if (ino == dn->inode->i_ino && dn->inode_page_locked)
522 		unlock_page(dn->inode_page);
523 
524 	set_new_dnode(&tdn, inode, NULL, NULL, 0);
525 	if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
526 		goto out;
527 
528 	if (tdn.data_blkaddr == blkaddr)
529 		f2fs_truncate_data_blocks_range(&tdn, 1);
530 
531 	f2fs_put_dnode(&tdn);
532 out:
533 	if (ino != dn->inode->i_ino)
534 		iput(inode);
535 	else if (dn->inode_page_locked)
536 		lock_page(dn->inode_page);
537 	return 0;
538 
539 truncate_out:
540 	if (f2fs_data_blkaddr(&tdn) == blkaddr)
541 		f2fs_truncate_data_blocks_range(&tdn, 1);
542 	if (dn->inode->i_ino == nid && !dn->inode_page_locked)
543 		unlock_page(dn->inode_page);
544 	return 0;
545 }
546 
547 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
548 					struct page *page)
549 {
550 	struct dnode_of_data dn;
551 	struct node_info ni;
552 	unsigned int start, end;
553 	int err = 0, recovered = 0;
554 
555 	/* step 1: recover xattr */
556 	if (IS_INODE(page)) {
557 		err = f2fs_recover_inline_xattr(inode, page);
558 		if (err)
559 			goto out;
560 	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
561 		err = f2fs_recover_xattr_data(inode, page);
562 		if (!err)
563 			recovered++;
564 		goto out;
565 	}
566 
567 	/* step 2: recover inline data */
568 	err = f2fs_recover_inline_data(inode, page);
569 	if (err) {
570 		if (err == 1)
571 			err = 0;
572 		goto out;
573 	}
574 
575 	/* step 3: recover data indices */
576 	start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
577 	end = start + ADDRS_PER_PAGE(page, inode);
578 
579 	set_new_dnode(&dn, inode, NULL, NULL, 0);
580 retry_dn:
581 	err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
582 	if (err) {
583 		if (err == -ENOMEM) {
584 			congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
585 			goto retry_dn;
586 		}
587 		goto out;
588 	}
589 
590 	f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
591 
592 	err = f2fs_get_node_info(sbi, dn.nid, &ni);
593 	if (err)
594 		goto err;
595 
596 	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
597 
598 	if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
599 		f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
600 			  inode->i_ino, ofs_of_node(dn.node_page),
601 			  ofs_of_node(page));
602 		err = -EFSCORRUPTED;
603 		goto err;
604 	}
605 
606 	for (; start < end; start++, dn.ofs_in_node++) {
607 		block_t src, dest;
608 
609 		src = f2fs_data_blkaddr(&dn);
610 		dest = data_blkaddr(dn.inode, page, dn.ofs_in_node);
611 
612 		if (__is_valid_data_blkaddr(src) &&
613 			!f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
614 			err = -EFSCORRUPTED;
615 			goto err;
616 		}
617 
618 		if (__is_valid_data_blkaddr(dest) &&
619 			!f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
620 			err = -EFSCORRUPTED;
621 			goto err;
622 		}
623 
624 		/* skip recovering if dest is the same as src */
625 		if (src == dest)
626 			continue;
627 
628 		/* dest is invalid, just invalidate src block */
629 		if (dest == NULL_ADDR) {
630 			f2fs_truncate_data_blocks_range(&dn, 1);
631 			continue;
632 		}
633 
634 		if (!file_keep_isize(inode) &&
635 			(i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
636 			f2fs_i_size_write(inode,
637 				(loff_t)(start + 1) << PAGE_SHIFT);
638 
639 		/*
640 		 * dest is reserved block, invalidate src block
641 		 * and then reserve one new block in dnode page.
642 		 */
643 		if (dest == NEW_ADDR) {
644 			f2fs_truncate_data_blocks_range(&dn, 1);
645 			f2fs_reserve_new_block(&dn);
646 			continue;
647 		}
648 
649 		/* dest is valid block, try to recover from src to dest */
650 		if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
651 
652 			if (src == NULL_ADDR) {
653 				err = f2fs_reserve_new_block(&dn);
654 				while (err &&
655 				       IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
656 					err = f2fs_reserve_new_block(&dn);
657 				/* We should not get -ENOSPC */
658 				f2fs_bug_on(sbi, err);
659 				if (err)
660 					goto err;
661 			}
662 retry_prev:
663 			/* Check the previous node page having this index */
664 			err = check_index_in_prev_nodes(sbi, dest, &dn);
665 			if (err) {
666 				if (err == -ENOMEM) {
667 					congestion_wait(BLK_RW_ASYNC,
668 							DEFAULT_IO_TIMEOUT);
669 					goto retry_prev;
670 				}
671 				goto err;
672 			}
673 
674 			/* write dummy data page */
675 			f2fs_replace_block(sbi, &dn, src, dest,
676 						ni.version, false, false);
677 			recovered++;
678 		}
679 	}
680 
681 	copy_node_footer(dn.node_page, page);
682 	fill_node_footer(dn.node_page, dn.nid, ni.ino,
683 					ofs_of_node(page), false);
684 	set_page_dirty(dn.node_page);
685 err:
686 	f2fs_put_dnode(&dn);
687 out:
688 	f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
689 		    inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
690 		    recovered, err);
691 	return err;
692 }
693 
694 static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
695 		struct list_head *tmp_inode_list, struct list_head *dir_list)
696 {
697 	struct curseg_info *curseg;
698 	struct page *page = NULL;
699 	int err = 0;
700 	block_t blkaddr;
701 
702 	/* get node pages in the current segment */
703 	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
704 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
705 
706 	while (1) {
707 		struct fsync_inode_entry *entry;
708 
709 		if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
710 			break;
711 
712 		f2fs_ra_meta_pages_cond(sbi, blkaddr);
713 
714 		page = f2fs_get_tmp_page(sbi, blkaddr);
715 		if (IS_ERR(page)) {
716 			err = PTR_ERR(page);
717 			break;
718 		}
719 
720 		if (!is_recoverable_dnode(page)) {
721 			f2fs_put_page(page, 1);
722 			break;
723 		}
724 
725 		entry = get_fsync_inode(inode_list, ino_of_node(page));
726 		if (!entry)
727 			goto next;
728 		/*
729 		 * inode(x) | CP | inode(x) | dnode(F)
730 		 * In this case, we can lose the latest inode(x).
731 		 * So, call recover_inode for the inode update.
732 		 */
733 		if (IS_INODE(page)) {
734 			err = recover_inode(entry->inode, page);
735 			if (err) {
736 				f2fs_put_page(page, 1);
737 				break;
738 			}
739 		}
740 		if (entry->last_dentry == blkaddr) {
741 			err = recover_dentry(entry->inode, page, dir_list);
742 			if (err) {
743 				f2fs_put_page(page, 1);
744 				break;
745 			}
746 		}
747 		err = do_recover_data(sbi, entry->inode, page);
748 		if (err) {
749 			f2fs_put_page(page, 1);
750 			break;
751 		}
752 
753 		if (entry->blkaddr == blkaddr)
754 			list_move_tail(&entry->list, tmp_inode_list);
755 next:
756 		/* check next segment */
757 		blkaddr = next_blkaddr_of_node(page);
758 		f2fs_put_page(page, 1);
759 	}
760 	if (!err)
761 		f2fs_allocate_new_segments(sbi);
762 	return err;
763 }
764 
765 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
766 {
767 	struct list_head inode_list, tmp_inode_list;
768 	struct list_head dir_list;
769 	int err;
770 	int ret = 0;
771 	unsigned long s_flags = sbi->sb->s_flags;
772 	bool need_writecp = false;
773 	bool fix_curseg_write_pointer = false;
774 #ifdef CONFIG_QUOTA
775 	int quota_enabled;
776 #endif
777 
778 	if (s_flags & SB_RDONLY) {
779 		f2fs_info(sbi, "recover fsync data on readonly fs");
780 		sbi->sb->s_flags &= ~SB_RDONLY;
781 	}
782 
783 #ifdef CONFIG_QUOTA
784 	/* Needed for iput() to work correctly and not trash data */
785 	sbi->sb->s_flags |= SB_ACTIVE;
786 	/* Turn on quotas so that they are updated correctly */
787 	quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
788 #endif
789 
790 	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
791 			sizeof(struct fsync_inode_entry));
792 	if (!fsync_entry_slab) {
793 		err = -ENOMEM;
794 		goto out;
795 	}
796 
797 	INIT_LIST_HEAD(&inode_list);
798 	INIT_LIST_HEAD(&tmp_inode_list);
799 	INIT_LIST_HEAD(&dir_list);
800 
801 	/* prevent checkpoint */
802 	down_write(&sbi->cp_global_sem);
803 
804 	/* step #1: find fsynced inode numbers */
805 	err = find_fsync_dnodes(sbi, &inode_list, check_only);
806 	if (err || list_empty(&inode_list))
807 		goto skip;
808 
809 	if (check_only) {
810 		ret = 1;
811 		goto skip;
812 	}
813 
814 	need_writecp = true;
815 
816 	/* step #2: recover data */
817 	err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
818 	if (!err)
819 		f2fs_bug_on(sbi, !list_empty(&inode_list));
820 	else {
821 		/* restore s_flags to let iput() trash data */
822 		sbi->sb->s_flags = s_flags;
823 	}
824 skip:
825 	fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
826 
827 	destroy_fsync_dnodes(&inode_list, err);
828 	destroy_fsync_dnodes(&tmp_inode_list, err);
829 
830 	/* truncate meta pages to be used by the recovery */
831 	truncate_inode_pages_range(META_MAPPING(sbi),
832 			(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
833 
834 	if (err) {
835 		truncate_inode_pages_final(NODE_MAPPING(sbi));
836 		truncate_inode_pages_final(META_MAPPING(sbi));
837 	}
838 
839 	/*
840 	 * If fsync data succeeds or there is no fsync data to recover,
841 	 * and the f2fs is not read only, check and fix zoned block devices'
842 	 * write pointer consistency.
843 	 */
844 	if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
845 			f2fs_sb_has_blkzoned(sbi)) {
846 		err = f2fs_fix_curseg_write_pointer(sbi);
847 		ret = err;
848 	}
849 
850 	if (!err)
851 		clear_sbi_flag(sbi, SBI_POR_DOING);
852 
853 	up_write(&sbi->cp_global_sem);
854 
855 	/* let's drop all the directory inodes for clean checkpoint */
856 	destroy_fsync_dnodes(&dir_list, err);
857 
858 	if (need_writecp) {
859 		set_sbi_flag(sbi, SBI_IS_RECOVERED);
860 
861 		if (!err) {
862 			struct cp_control cpc = {
863 				.reason = CP_RECOVERY,
864 			};
865 			err = f2fs_write_checkpoint(sbi, &cpc);
866 		}
867 	}
868 
869 	kmem_cache_destroy(fsync_entry_slab);
870 out:
871 #ifdef CONFIG_QUOTA
872 	/* Turn quotas off */
873 	if (quota_enabled)
874 		f2fs_quota_off_umount(sbi->sb);
875 #endif
876 	sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
877 
878 	return ret ? ret: err;
879 }
880