xref: /linux/fs/f2fs/node.c (revision 73b0140bf0fe9df90fb267c00673c4b9bf285430)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/node.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/mpage.h>
11 #include <linux/backing-dev.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagevec.h>
14 #include <linux/swap.h>
15 
16 #include "f2fs.h"
17 #include "node.h"
18 #include "segment.h"
19 #include "xattr.h"
20 #include "trace.h"
21 #include <trace/events/f2fs.h>
22 
23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
24 
25 static struct kmem_cache *nat_entry_slab;
26 static struct kmem_cache *free_nid_slab;
27 static struct kmem_cache *nat_entry_set_slab;
28 static struct kmem_cache *fsync_node_entry_slab;
29 
30 /*
31  * Check whether the given nid is within node id range.
32  */
33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
34 {
35 	if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
36 		set_sbi_flag(sbi, SBI_NEED_FSCK);
37 		f2fs_msg(sbi->sb, KERN_WARNING,
38 				"%s: out-of-range nid=%x, run fsck to fix.",
39 				__func__, nid);
40 		return -EINVAL;
41 	}
42 	return 0;
43 }
44 
45 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
46 {
47 	struct f2fs_nm_info *nm_i = NM_I(sbi);
48 	struct sysinfo val;
49 	unsigned long avail_ram;
50 	unsigned long mem_size = 0;
51 	bool res = false;
52 
53 	si_meminfo(&val);
54 
55 	/* only uses low memory */
56 	avail_ram = val.totalram - val.totalhigh;
57 
58 	/*
59 	 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
60 	 */
61 	if (type == FREE_NIDS) {
62 		mem_size = (nm_i->nid_cnt[FREE_NID] *
63 				sizeof(struct free_nid)) >> PAGE_SHIFT;
64 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
65 	} else if (type == NAT_ENTRIES) {
66 		mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
67 							PAGE_SHIFT;
68 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
69 		if (excess_cached_nats(sbi))
70 			res = false;
71 	} else if (type == DIRTY_DENTS) {
72 		if (sbi->sb->s_bdi->wb.dirty_exceeded)
73 			return false;
74 		mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
75 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
76 	} else if (type == INO_ENTRIES) {
77 		int i;
78 
79 		for (i = 0; i < MAX_INO_ENTRY; i++)
80 			mem_size += sbi->im[i].ino_num *
81 						sizeof(struct ino_entry);
82 		mem_size >>= PAGE_SHIFT;
83 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
84 	} else if (type == EXTENT_CACHE) {
85 		mem_size = (atomic_read(&sbi->total_ext_tree) *
86 				sizeof(struct extent_tree) +
87 				atomic_read(&sbi->total_ext_node) *
88 				sizeof(struct extent_node)) >> PAGE_SHIFT;
89 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
90 	} else if (type == INMEM_PAGES) {
91 		/* it allows 20% / total_ram for inmemory pages */
92 		mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
93 		res = mem_size < (val.totalram / 5);
94 	} else {
95 		if (!sbi->sb->s_bdi->wb.dirty_exceeded)
96 			return true;
97 	}
98 	return res;
99 }
100 
101 static void clear_node_page_dirty(struct page *page)
102 {
103 	if (PageDirty(page)) {
104 		f2fs_clear_page_cache_dirty_tag(page);
105 		clear_page_dirty_for_io(page);
106 		dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
107 	}
108 	ClearPageUptodate(page);
109 }
110 
111 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
112 {
113 	return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid));
114 }
115 
116 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
117 {
118 	struct page *src_page;
119 	struct page *dst_page;
120 	pgoff_t dst_off;
121 	void *src_addr;
122 	void *dst_addr;
123 	struct f2fs_nm_info *nm_i = NM_I(sbi);
124 
125 	dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
126 
127 	/* get current nat block page with lock */
128 	src_page = get_current_nat_page(sbi, nid);
129 	if (IS_ERR(src_page))
130 		return src_page;
131 	dst_page = f2fs_grab_meta_page(sbi, dst_off);
132 	f2fs_bug_on(sbi, PageDirty(src_page));
133 
134 	src_addr = page_address(src_page);
135 	dst_addr = page_address(dst_page);
136 	memcpy(dst_addr, src_addr, PAGE_SIZE);
137 	set_page_dirty(dst_page);
138 	f2fs_put_page(src_page, 1);
139 
140 	set_to_next_nat(nm_i, nid);
141 
142 	return dst_page;
143 }
144 
145 static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
146 {
147 	struct nat_entry *new;
148 
149 	if (no_fail)
150 		new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
151 	else
152 		new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
153 	if (new) {
154 		nat_set_nid(new, nid);
155 		nat_reset_flag(new);
156 	}
157 	return new;
158 }
159 
160 static void __free_nat_entry(struct nat_entry *e)
161 {
162 	kmem_cache_free(nat_entry_slab, e);
163 }
164 
165 /* must be locked by nat_tree_lock */
166 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
167 	struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
168 {
169 	if (no_fail)
170 		f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
171 	else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
172 		return NULL;
173 
174 	if (raw_ne)
175 		node_info_from_raw_nat(&ne->ni, raw_ne);
176 
177 	spin_lock(&nm_i->nat_list_lock);
178 	list_add_tail(&ne->list, &nm_i->nat_entries);
179 	spin_unlock(&nm_i->nat_list_lock);
180 
181 	nm_i->nat_cnt++;
182 	return ne;
183 }
184 
185 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
186 {
187 	struct nat_entry *ne;
188 
189 	ne = radix_tree_lookup(&nm_i->nat_root, n);
190 
191 	/* for recent accessed nat entry, move it to tail of lru list */
192 	if (ne && !get_nat_flag(ne, IS_DIRTY)) {
193 		spin_lock(&nm_i->nat_list_lock);
194 		if (!list_empty(&ne->list))
195 			list_move_tail(&ne->list, &nm_i->nat_entries);
196 		spin_unlock(&nm_i->nat_list_lock);
197 	}
198 
199 	return ne;
200 }
201 
202 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
203 		nid_t start, unsigned int nr, struct nat_entry **ep)
204 {
205 	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
206 }
207 
208 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
209 {
210 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
211 	nm_i->nat_cnt--;
212 	__free_nat_entry(e);
213 }
214 
215 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
216 							struct nat_entry *ne)
217 {
218 	nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
219 	struct nat_entry_set *head;
220 
221 	head = radix_tree_lookup(&nm_i->nat_set_root, set);
222 	if (!head) {
223 		head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
224 
225 		INIT_LIST_HEAD(&head->entry_list);
226 		INIT_LIST_HEAD(&head->set_list);
227 		head->set = set;
228 		head->entry_cnt = 0;
229 		f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
230 	}
231 	return head;
232 }
233 
234 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
235 						struct nat_entry *ne)
236 {
237 	struct nat_entry_set *head;
238 	bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
239 
240 	if (!new_ne)
241 		head = __grab_nat_entry_set(nm_i, ne);
242 
243 	/*
244 	 * update entry_cnt in below condition:
245 	 * 1. update NEW_ADDR to valid block address;
246 	 * 2. update old block address to new one;
247 	 */
248 	if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
249 				!get_nat_flag(ne, IS_DIRTY)))
250 		head->entry_cnt++;
251 
252 	set_nat_flag(ne, IS_PREALLOC, new_ne);
253 
254 	if (get_nat_flag(ne, IS_DIRTY))
255 		goto refresh_list;
256 
257 	nm_i->dirty_nat_cnt++;
258 	set_nat_flag(ne, IS_DIRTY, true);
259 refresh_list:
260 	spin_lock(&nm_i->nat_list_lock);
261 	if (new_ne)
262 		list_del_init(&ne->list);
263 	else
264 		list_move_tail(&ne->list, &head->entry_list);
265 	spin_unlock(&nm_i->nat_list_lock);
266 }
267 
268 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
269 		struct nat_entry_set *set, struct nat_entry *ne)
270 {
271 	spin_lock(&nm_i->nat_list_lock);
272 	list_move_tail(&ne->list, &nm_i->nat_entries);
273 	spin_unlock(&nm_i->nat_list_lock);
274 
275 	set_nat_flag(ne, IS_DIRTY, false);
276 	set->entry_cnt--;
277 	nm_i->dirty_nat_cnt--;
278 }
279 
280 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
281 		nid_t start, unsigned int nr, struct nat_entry_set **ep)
282 {
283 	return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
284 							start, nr);
285 }
286 
287 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
288 {
289 	return NODE_MAPPING(sbi) == page->mapping &&
290 			IS_DNODE(page) && is_cold_node(page);
291 }
292 
293 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
294 {
295 	spin_lock_init(&sbi->fsync_node_lock);
296 	INIT_LIST_HEAD(&sbi->fsync_node_list);
297 	sbi->fsync_seg_id = 0;
298 	sbi->fsync_node_num = 0;
299 }
300 
301 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
302 							struct page *page)
303 {
304 	struct fsync_node_entry *fn;
305 	unsigned long flags;
306 	unsigned int seq_id;
307 
308 	fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS);
309 
310 	get_page(page);
311 	fn->page = page;
312 	INIT_LIST_HEAD(&fn->list);
313 
314 	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
315 	list_add_tail(&fn->list, &sbi->fsync_node_list);
316 	fn->seq_id = sbi->fsync_seg_id++;
317 	seq_id = fn->seq_id;
318 	sbi->fsync_node_num++;
319 	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
320 
321 	return seq_id;
322 }
323 
324 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
325 {
326 	struct fsync_node_entry *fn;
327 	unsigned long flags;
328 
329 	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
330 	list_for_each_entry(fn, &sbi->fsync_node_list, list) {
331 		if (fn->page == page) {
332 			list_del(&fn->list);
333 			sbi->fsync_node_num--;
334 			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
335 			kmem_cache_free(fsync_node_entry_slab, fn);
336 			put_page(page);
337 			return;
338 		}
339 	}
340 	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
341 	f2fs_bug_on(sbi, 1);
342 }
343 
344 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
345 {
346 	unsigned long flags;
347 
348 	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
349 	sbi->fsync_seg_id = 0;
350 	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
351 }
352 
353 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
354 {
355 	struct f2fs_nm_info *nm_i = NM_I(sbi);
356 	struct nat_entry *e;
357 	bool need = false;
358 
359 	down_read(&nm_i->nat_tree_lock);
360 	e = __lookup_nat_cache(nm_i, nid);
361 	if (e) {
362 		if (!get_nat_flag(e, IS_CHECKPOINTED) &&
363 				!get_nat_flag(e, HAS_FSYNCED_INODE))
364 			need = true;
365 	}
366 	up_read(&nm_i->nat_tree_lock);
367 	return need;
368 }
369 
370 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
371 {
372 	struct f2fs_nm_info *nm_i = NM_I(sbi);
373 	struct nat_entry *e;
374 	bool is_cp = true;
375 
376 	down_read(&nm_i->nat_tree_lock);
377 	e = __lookup_nat_cache(nm_i, nid);
378 	if (e && !get_nat_flag(e, IS_CHECKPOINTED))
379 		is_cp = false;
380 	up_read(&nm_i->nat_tree_lock);
381 	return is_cp;
382 }
383 
384 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
385 {
386 	struct f2fs_nm_info *nm_i = NM_I(sbi);
387 	struct nat_entry *e;
388 	bool need_update = true;
389 
390 	down_read(&nm_i->nat_tree_lock);
391 	e = __lookup_nat_cache(nm_i, ino);
392 	if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
393 			(get_nat_flag(e, IS_CHECKPOINTED) ||
394 			 get_nat_flag(e, HAS_FSYNCED_INODE)))
395 		need_update = false;
396 	up_read(&nm_i->nat_tree_lock);
397 	return need_update;
398 }
399 
400 /* must be locked by nat_tree_lock */
401 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
402 						struct f2fs_nat_entry *ne)
403 {
404 	struct f2fs_nm_info *nm_i = NM_I(sbi);
405 	struct nat_entry *new, *e;
406 
407 	new = __alloc_nat_entry(nid, false);
408 	if (!new)
409 		return;
410 
411 	down_write(&nm_i->nat_tree_lock);
412 	e = __lookup_nat_cache(nm_i, nid);
413 	if (!e)
414 		e = __init_nat_entry(nm_i, new, ne, false);
415 	else
416 		f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
417 				nat_get_blkaddr(e) !=
418 					le32_to_cpu(ne->block_addr) ||
419 				nat_get_version(e) != ne->version);
420 	up_write(&nm_i->nat_tree_lock);
421 	if (e != new)
422 		__free_nat_entry(new);
423 }
424 
425 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
426 			block_t new_blkaddr, bool fsync_done)
427 {
428 	struct f2fs_nm_info *nm_i = NM_I(sbi);
429 	struct nat_entry *e;
430 	struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
431 
432 	down_write(&nm_i->nat_tree_lock);
433 	e = __lookup_nat_cache(nm_i, ni->nid);
434 	if (!e) {
435 		e = __init_nat_entry(nm_i, new, NULL, true);
436 		copy_node_info(&e->ni, ni);
437 		f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
438 	} else if (new_blkaddr == NEW_ADDR) {
439 		/*
440 		 * when nid is reallocated,
441 		 * previous nat entry can be remained in nat cache.
442 		 * So, reinitialize it with new information.
443 		 */
444 		copy_node_info(&e->ni, ni);
445 		f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
446 	}
447 	/* let's free early to reduce memory consumption */
448 	if (e != new)
449 		__free_nat_entry(new);
450 
451 	/* sanity check */
452 	f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
453 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
454 			new_blkaddr == NULL_ADDR);
455 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
456 			new_blkaddr == NEW_ADDR);
457 	f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
458 			new_blkaddr == NEW_ADDR);
459 
460 	/* increment version no as node is removed */
461 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
462 		unsigned char version = nat_get_version(e);
463 		nat_set_version(e, inc_node_version(version));
464 	}
465 
466 	/* change address */
467 	nat_set_blkaddr(e, new_blkaddr);
468 	if (!is_valid_data_blkaddr(sbi, new_blkaddr))
469 		set_nat_flag(e, IS_CHECKPOINTED, false);
470 	__set_nat_cache_dirty(nm_i, e);
471 
472 	/* update fsync_mark if its inode nat entry is still alive */
473 	if (ni->nid != ni->ino)
474 		e = __lookup_nat_cache(nm_i, ni->ino);
475 	if (e) {
476 		if (fsync_done && ni->nid == ni->ino)
477 			set_nat_flag(e, HAS_FSYNCED_INODE, true);
478 		set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
479 	}
480 	up_write(&nm_i->nat_tree_lock);
481 }
482 
483 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
484 {
485 	struct f2fs_nm_info *nm_i = NM_I(sbi);
486 	int nr = nr_shrink;
487 
488 	if (!down_write_trylock(&nm_i->nat_tree_lock))
489 		return 0;
490 
491 	spin_lock(&nm_i->nat_list_lock);
492 	while (nr_shrink) {
493 		struct nat_entry *ne;
494 
495 		if (list_empty(&nm_i->nat_entries))
496 			break;
497 
498 		ne = list_first_entry(&nm_i->nat_entries,
499 					struct nat_entry, list);
500 		list_del(&ne->list);
501 		spin_unlock(&nm_i->nat_list_lock);
502 
503 		__del_from_nat_cache(nm_i, ne);
504 		nr_shrink--;
505 
506 		spin_lock(&nm_i->nat_list_lock);
507 	}
508 	spin_unlock(&nm_i->nat_list_lock);
509 
510 	up_write(&nm_i->nat_tree_lock);
511 	return nr - nr_shrink;
512 }
513 
514 /*
515  * This function always returns success
516  */
517 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
518 						struct node_info *ni)
519 {
520 	struct f2fs_nm_info *nm_i = NM_I(sbi);
521 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
522 	struct f2fs_journal *journal = curseg->journal;
523 	nid_t start_nid = START_NID(nid);
524 	struct f2fs_nat_block *nat_blk;
525 	struct page *page = NULL;
526 	struct f2fs_nat_entry ne;
527 	struct nat_entry *e;
528 	pgoff_t index;
529 	int i;
530 
531 	ni->nid = nid;
532 
533 	/* Check nat cache */
534 	down_read(&nm_i->nat_tree_lock);
535 	e = __lookup_nat_cache(nm_i, nid);
536 	if (e) {
537 		ni->ino = nat_get_ino(e);
538 		ni->blk_addr = nat_get_blkaddr(e);
539 		ni->version = nat_get_version(e);
540 		up_read(&nm_i->nat_tree_lock);
541 		return 0;
542 	}
543 
544 	memset(&ne, 0, sizeof(struct f2fs_nat_entry));
545 
546 	/* Check current segment summary */
547 	down_read(&curseg->journal_rwsem);
548 	i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
549 	if (i >= 0) {
550 		ne = nat_in_journal(journal, i);
551 		node_info_from_raw_nat(ni, &ne);
552 	}
553 	up_read(&curseg->journal_rwsem);
554 	if (i >= 0) {
555 		up_read(&nm_i->nat_tree_lock);
556 		goto cache;
557 	}
558 
559 	/* Fill node_info from nat page */
560 	index = current_nat_addr(sbi, nid);
561 	up_read(&nm_i->nat_tree_lock);
562 
563 	page = f2fs_get_meta_page(sbi, index);
564 	if (IS_ERR(page))
565 		return PTR_ERR(page);
566 
567 	nat_blk = (struct f2fs_nat_block *)page_address(page);
568 	ne = nat_blk->entries[nid - start_nid];
569 	node_info_from_raw_nat(ni, &ne);
570 	f2fs_put_page(page, 1);
571 cache:
572 	/* cache nat entry */
573 	cache_nat_entry(sbi, nid, &ne);
574 	return 0;
575 }
576 
577 /*
578  * readahead MAX_RA_NODE number of node pages.
579  */
580 static void f2fs_ra_node_pages(struct page *parent, int start, int n)
581 {
582 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
583 	struct blk_plug plug;
584 	int i, end;
585 	nid_t nid;
586 
587 	blk_start_plug(&plug);
588 
589 	/* Then, try readahead for siblings of the desired node */
590 	end = start + n;
591 	end = min(end, NIDS_PER_BLOCK);
592 	for (i = start; i < end; i++) {
593 		nid = get_nid(parent, i, false);
594 		f2fs_ra_node_page(sbi, nid);
595 	}
596 
597 	blk_finish_plug(&plug);
598 }
599 
600 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
601 {
602 	const long direct_index = ADDRS_PER_INODE(dn->inode);
603 	const long direct_blks = ADDRS_PER_BLOCK;
604 	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
605 	unsigned int skipped_unit = ADDRS_PER_BLOCK;
606 	int cur_level = dn->cur_level;
607 	int max_level = dn->max_level;
608 	pgoff_t base = 0;
609 
610 	if (!dn->max_level)
611 		return pgofs + 1;
612 
613 	while (max_level-- > cur_level)
614 		skipped_unit *= NIDS_PER_BLOCK;
615 
616 	switch (dn->max_level) {
617 	case 3:
618 		base += 2 * indirect_blks;
619 		/* fall through */
620 	case 2:
621 		base += 2 * direct_blks;
622 		/* fall through */
623 	case 1:
624 		base += direct_index;
625 		break;
626 	default:
627 		f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
628 	}
629 
630 	return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
631 }
632 
633 /*
634  * The maximum depth is four.
635  * Offset[0] will have raw inode offset.
636  */
637 static int get_node_path(struct inode *inode, long block,
638 				int offset[4], unsigned int noffset[4])
639 {
640 	const long direct_index = ADDRS_PER_INODE(inode);
641 	const long direct_blks = ADDRS_PER_BLOCK;
642 	const long dptrs_per_blk = NIDS_PER_BLOCK;
643 	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
644 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
645 	int n = 0;
646 	int level = 0;
647 
648 	noffset[0] = 0;
649 
650 	if (block < direct_index) {
651 		offset[n] = block;
652 		goto got;
653 	}
654 	block -= direct_index;
655 	if (block < direct_blks) {
656 		offset[n++] = NODE_DIR1_BLOCK;
657 		noffset[n] = 1;
658 		offset[n] = block;
659 		level = 1;
660 		goto got;
661 	}
662 	block -= direct_blks;
663 	if (block < direct_blks) {
664 		offset[n++] = NODE_DIR2_BLOCK;
665 		noffset[n] = 2;
666 		offset[n] = block;
667 		level = 1;
668 		goto got;
669 	}
670 	block -= direct_blks;
671 	if (block < indirect_blks) {
672 		offset[n++] = NODE_IND1_BLOCK;
673 		noffset[n] = 3;
674 		offset[n++] = block / direct_blks;
675 		noffset[n] = 4 + offset[n - 1];
676 		offset[n] = block % direct_blks;
677 		level = 2;
678 		goto got;
679 	}
680 	block -= indirect_blks;
681 	if (block < indirect_blks) {
682 		offset[n++] = NODE_IND2_BLOCK;
683 		noffset[n] = 4 + dptrs_per_blk;
684 		offset[n++] = block / direct_blks;
685 		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
686 		offset[n] = block % direct_blks;
687 		level = 2;
688 		goto got;
689 	}
690 	block -= indirect_blks;
691 	if (block < dindirect_blks) {
692 		offset[n++] = NODE_DIND_BLOCK;
693 		noffset[n] = 5 + (dptrs_per_blk * 2);
694 		offset[n++] = block / indirect_blks;
695 		noffset[n] = 6 + (dptrs_per_blk * 2) +
696 			      offset[n - 1] * (dptrs_per_blk + 1);
697 		offset[n++] = (block / direct_blks) % dptrs_per_blk;
698 		noffset[n] = 7 + (dptrs_per_blk * 2) +
699 			      offset[n - 2] * (dptrs_per_blk + 1) +
700 			      offset[n - 1];
701 		offset[n] = block % direct_blks;
702 		level = 3;
703 		goto got;
704 	} else {
705 		return -E2BIG;
706 	}
707 got:
708 	return level;
709 }
710 
711 /*
712  * Caller should call f2fs_put_dnode(dn).
713  * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
714  * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
715  * In the case of RDONLY_NODE, we don't need to care about mutex.
716  */
717 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
718 {
719 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
720 	struct page *npage[4];
721 	struct page *parent = NULL;
722 	int offset[4];
723 	unsigned int noffset[4];
724 	nid_t nids[4];
725 	int level, i = 0;
726 	int err = 0;
727 
728 	level = get_node_path(dn->inode, index, offset, noffset);
729 	if (level < 0)
730 		return level;
731 
732 	nids[0] = dn->inode->i_ino;
733 	npage[0] = dn->inode_page;
734 
735 	if (!npage[0]) {
736 		npage[0] = f2fs_get_node_page(sbi, nids[0]);
737 		if (IS_ERR(npage[0]))
738 			return PTR_ERR(npage[0]);
739 	}
740 
741 	/* if inline_data is set, should not report any block indices */
742 	if (f2fs_has_inline_data(dn->inode) && index) {
743 		err = -ENOENT;
744 		f2fs_put_page(npage[0], 1);
745 		goto release_out;
746 	}
747 
748 	parent = npage[0];
749 	if (level != 0)
750 		nids[1] = get_nid(parent, offset[0], true);
751 	dn->inode_page = npage[0];
752 	dn->inode_page_locked = true;
753 
754 	/* get indirect or direct nodes */
755 	for (i = 1; i <= level; i++) {
756 		bool done = false;
757 
758 		if (!nids[i] && mode == ALLOC_NODE) {
759 			/* alloc new node */
760 			if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
761 				err = -ENOSPC;
762 				goto release_pages;
763 			}
764 
765 			dn->nid = nids[i];
766 			npage[i] = f2fs_new_node_page(dn, noffset[i]);
767 			if (IS_ERR(npage[i])) {
768 				f2fs_alloc_nid_failed(sbi, nids[i]);
769 				err = PTR_ERR(npage[i]);
770 				goto release_pages;
771 			}
772 
773 			set_nid(parent, offset[i - 1], nids[i], i == 1);
774 			f2fs_alloc_nid_done(sbi, nids[i]);
775 			done = true;
776 		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
777 			npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
778 			if (IS_ERR(npage[i])) {
779 				err = PTR_ERR(npage[i]);
780 				goto release_pages;
781 			}
782 			done = true;
783 		}
784 		if (i == 1) {
785 			dn->inode_page_locked = false;
786 			unlock_page(parent);
787 		} else {
788 			f2fs_put_page(parent, 1);
789 		}
790 
791 		if (!done) {
792 			npage[i] = f2fs_get_node_page(sbi, nids[i]);
793 			if (IS_ERR(npage[i])) {
794 				err = PTR_ERR(npage[i]);
795 				f2fs_put_page(npage[0], 0);
796 				goto release_out;
797 			}
798 		}
799 		if (i < level) {
800 			parent = npage[i];
801 			nids[i + 1] = get_nid(parent, offset[i], false);
802 		}
803 	}
804 	dn->nid = nids[level];
805 	dn->ofs_in_node = offset[level];
806 	dn->node_page = npage[level];
807 	dn->data_blkaddr = datablock_addr(dn->inode,
808 				dn->node_page, dn->ofs_in_node);
809 	return 0;
810 
811 release_pages:
812 	f2fs_put_page(parent, 1);
813 	if (i > 1)
814 		f2fs_put_page(npage[0], 0);
815 release_out:
816 	dn->inode_page = NULL;
817 	dn->node_page = NULL;
818 	if (err == -ENOENT) {
819 		dn->cur_level = i;
820 		dn->max_level = level;
821 		dn->ofs_in_node = offset[level];
822 	}
823 	return err;
824 }
825 
826 static int truncate_node(struct dnode_of_data *dn)
827 {
828 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
829 	struct node_info ni;
830 	int err;
831 	pgoff_t index;
832 
833 	err = f2fs_get_node_info(sbi, dn->nid, &ni);
834 	if (err)
835 		return err;
836 
837 	/* Deallocate node address */
838 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
839 	dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
840 	set_node_addr(sbi, &ni, NULL_ADDR, false);
841 
842 	if (dn->nid == dn->inode->i_ino) {
843 		f2fs_remove_orphan_inode(sbi, dn->nid);
844 		dec_valid_inode_count(sbi);
845 		f2fs_inode_synced(dn->inode);
846 	}
847 
848 	clear_node_page_dirty(dn->node_page);
849 	set_sbi_flag(sbi, SBI_IS_DIRTY);
850 
851 	index = dn->node_page->index;
852 	f2fs_put_page(dn->node_page, 1);
853 
854 	invalidate_mapping_pages(NODE_MAPPING(sbi),
855 			index, index);
856 
857 	dn->node_page = NULL;
858 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
859 
860 	return 0;
861 }
862 
863 static int truncate_dnode(struct dnode_of_data *dn)
864 {
865 	struct page *page;
866 	int err;
867 
868 	if (dn->nid == 0)
869 		return 1;
870 
871 	/* get direct node */
872 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
873 	if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
874 		return 1;
875 	else if (IS_ERR(page))
876 		return PTR_ERR(page);
877 
878 	/* Make dnode_of_data for parameter */
879 	dn->node_page = page;
880 	dn->ofs_in_node = 0;
881 	f2fs_truncate_data_blocks(dn);
882 	err = truncate_node(dn);
883 	if (err)
884 		return err;
885 
886 	return 1;
887 }
888 
889 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
890 						int ofs, int depth)
891 {
892 	struct dnode_of_data rdn = *dn;
893 	struct page *page;
894 	struct f2fs_node *rn;
895 	nid_t child_nid;
896 	unsigned int child_nofs;
897 	int freed = 0;
898 	int i, ret;
899 
900 	if (dn->nid == 0)
901 		return NIDS_PER_BLOCK + 1;
902 
903 	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
904 
905 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
906 	if (IS_ERR(page)) {
907 		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
908 		return PTR_ERR(page);
909 	}
910 
911 	f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
912 
913 	rn = F2FS_NODE(page);
914 	if (depth < 3) {
915 		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
916 			child_nid = le32_to_cpu(rn->in.nid[i]);
917 			if (child_nid == 0)
918 				continue;
919 			rdn.nid = child_nid;
920 			ret = truncate_dnode(&rdn);
921 			if (ret < 0)
922 				goto out_err;
923 			if (set_nid(page, i, 0, false))
924 				dn->node_changed = true;
925 		}
926 	} else {
927 		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
928 		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
929 			child_nid = le32_to_cpu(rn->in.nid[i]);
930 			if (child_nid == 0) {
931 				child_nofs += NIDS_PER_BLOCK + 1;
932 				continue;
933 			}
934 			rdn.nid = child_nid;
935 			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
936 			if (ret == (NIDS_PER_BLOCK + 1)) {
937 				if (set_nid(page, i, 0, false))
938 					dn->node_changed = true;
939 				child_nofs += ret;
940 			} else if (ret < 0 && ret != -ENOENT) {
941 				goto out_err;
942 			}
943 		}
944 		freed = child_nofs;
945 	}
946 
947 	if (!ofs) {
948 		/* remove current indirect node */
949 		dn->node_page = page;
950 		ret = truncate_node(dn);
951 		if (ret)
952 			goto out_err;
953 		freed++;
954 	} else {
955 		f2fs_put_page(page, 1);
956 	}
957 	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
958 	return freed;
959 
960 out_err:
961 	f2fs_put_page(page, 1);
962 	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
963 	return ret;
964 }
965 
966 static int truncate_partial_nodes(struct dnode_of_data *dn,
967 			struct f2fs_inode *ri, int *offset, int depth)
968 {
969 	struct page *pages[2];
970 	nid_t nid[3];
971 	nid_t child_nid;
972 	int err = 0;
973 	int i;
974 	int idx = depth - 2;
975 
976 	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
977 	if (!nid[0])
978 		return 0;
979 
980 	/* get indirect nodes in the path */
981 	for (i = 0; i < idx + 1; i++) {
982 		/* reference count'll be increased */
983 		pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
984 		if (IS_ERR(pages[i])) {
985 			err = PTR_ERR(pages[i]);
986 			idx = i - 1;
987 			goto fail;
988 		}
989 		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
990 	}
991 
992 	f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
993 
994 	/* free direct nodes linked to a partial indirect node */
995 	for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
996 		child_nid = get_nid(pages[idx], i, false);
997 		if (!child_nid)
998 			continue;
999 		dn->nid = child_nid;
1000 		err = truncate_dnode(dn);
1001 		if (err < 0)
1002 			goto fail;
1003 		if (set_nid(pages[idx], i, 0, false))
1004 			dn->node_changed = true;
1005 	}
1006 
1007 	if (offset[idx + 1] == 0) {
1008 		dn->node_page = pages[idx];
1009 		dn->nid = nid[idx];
1010 		err = truncate_node(dn);
1011 		if (err)
1012 			goto fail;
1013 	} else {
1014 		f2fs_put_page(pages[idx], 1);
1015 	}
1016 	offset[idx]++;
1017 	offset[idx + 1] = 0;
1018 	idx--;
1019 fail:
1020 	for (i = idx; i >= 0; i--)
1021 		f2fs_put_page(pages[i], 1);
1022 
1023 	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1024 
1025 	return err;
1026 }
1027 
1028 /*
1029  * All the block addresses of data and nodes should be nullified.
1030  */
1031 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1032 {
1033 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1034 	int err = 0, cont = 1;
1035 	int level, offset[4], noffset[4];
1036 	unsigned int nofs = 0;
1037 	struct f2fs_inode *ri;
1038 	struct dnode_of_data dn;
1039 	struct page *page;
1040 
1041 	trace_f2fs_truncate_inode_blocks_enter(inode, from);
1042 
1043 	level = get_node_path(inode, from, offset, noffset);
1044 	if (level < 0)
1045 		return level;
1046 
1047 	page = f2fs_get_node_page(sbi, inode->i_ino);
1048 	if (IS_ERR(page)) {
1049 		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
1050 		return PTR_ERR(page);
1051 	}
1052 
1053 	set_new_dnode(&dn, inode, page, NULL, 0);
1054 	unlock_page(page);
1055 
1056 	ri = F2FS_INODE(page);
1057 	switch (level) {
1058 	case 0:
1059 	case 1:
1060 		nofs = noffset[1];
1061 		break;
1062 	case 2:
1063 		nofs = noffset[1];
1064 		if (!offset[level - 1])
1065 			goto skip_partial;
1066 		err = truncate_partial_nodes(&dn, ri, offset, level);
1067 		if (err < 0 && err != -ENOENT)
1068 			goto fail;
1069 		nofs += 1 + NIDS_PER_BLOCK;
1070 		break;
1071 	case 3:
1072 		nofs = 5 + 2 * NIDS_PER_BLOCK;
1073 		if (!offset[level - 1])
1074 			goto skip_partial;
1075 		err = truncate_partial_nodes(&dn, ri, offset, level);
1076 		if (err < 0 && err != -ENOENT)
1077 			goto fail;
1078 		break;
1079 	default:
1080 		BUG();
1081 	}
1082 
1083 skip_partial:
1084 	while (cont) {
1085 		dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1086 		switch (offset[0]) {
1087 		case NODE_DIR1_BLOCK:
1088 		case NODE_DIR2_BLOCK:
1089 			err = truncate_dnode(&dn);
1090 			break;
1091 
1092 		case NODE_IND1_BLOCK:
1093 		case NODE_IND2_BLOCK:
1094 			err = truncate_nodes(&dn, nofs, offset[1], 2);
1095 			break;
1096 
1097 		case NODE_DIND_BLOCK:
1098 			err = truncate_nodes(&dn, nofs, offset[1], 3);
1099 			cont = 0;
1100 			break;
1101 
1102 		default:
1103 			BUG();
1104 		}
1105 		if (err < 0 && err != -ENOENT)
1106 			goto fail;
1107 		if (offset[1] == 0 &&
1108 				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
1109 			lock_page(page);
1110 			BUG_ON(page->mapping != NODE_MAPPING(sbi));
1111 			f2fs_wait_on_page_writeback(page, NODE, true, true);
1112 			ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
1113 			set_page_dirty(page);
1114 			unlock_page(page);
1115 		}
1116 		offset[1] = 0;
1117 		offset[0]++;
1118 		nofs += err;
1119 	}
1120 fail:
1121 	f2fs_put_page(page, 0);
1122 	trace_f2fs_truncate_inode_blocks_exit(inode, err);
1123 	return err > 0 ? 0 : err;
1124 }
1125 
1126 /* caller must lock inode page */
1127 int f2fs_truncate_xattr_node(struct inode *inode)
1128 {
1129 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1130 	nid_t nid = F2FS_I(inode)->i_xattr_nid;
1131 	struct dnode_of_data dn;
1132 	struct page *npage;
1133 	int err;
1134 
1135 	if (!nid)
1136 		return 0;
1137 
1138 	npage = f2fs_get_node_page(sbi, nid);
1139 	if (IS_ERR(npage))
1140 		return PTR_ERR(npage);
1141 
1142 	set_new_dnode(&dn, inode, NULL, npage, nid);
1143 	err = truncate_node(&dn);
1144 	if (err) {
1145 		f2fs_put_page(npage, 1);
1146 		return err;
1147 	}
1148 
1149 	f2fs_i_xnid_write(inode, 0);
1150 
1151 	return 0;
1152 }
1153 
1154 /*
1155  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1156  * f2fs_unlock_op().
1157  */
1158 int f2fs_remove_inode_page(struct inode *inode)
1159 {
1160 	struct dnode_of_data dn;
1161 	int err;
1162 
1163 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1164 	err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1165 	if (err)
1166 		return err;
1167 
1168 	err = f2fs_truncate_xattr_node(inode);
1169 	if (err) {
1170 		f2fs_put_dnode(&dn);
1171 		return err;
1172 	}
1173 
1174 	/* remove potential inline_data blocks */
1175 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1176 				S_ISLNK(inode->i_mode))
1177 		f2fs_truncate_data_blocks_range(&dn, 1);
1178 
1179 	/* 0 is possible, after f2fs_new_inode() has failed */
1180 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1181 		f2fs_put_dnode(&dn);
1182 		return -EIO;
1183 	}
1184 	f2fs_bug_on(F2FS_I_SB(inode),
1185 			inode->i_blocks != 0 && inode->i_blocks != 8);
1186 
1187 	/* will put inode & node pages */
1188 	err = truncate_node(&dn);
1189 	if (err) {
1190 		f2fs_put_dnode(&dn);
1191 		return err;
1192 	}
1193 	return 0;
1194 }
1195 
1196 struct page *f2fs_new_inode_page(struct inode *inode)
1197 {
1198 	struct dnode_of_data dn;
1199 
1200 	/* allocate inode page for new inode */
1201 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1202 
1203 	/* caller should f2fs_put_page(page, 1); */
1204 	return f2fs_new_node_page(&dn, 0);
1205 }
1206 
1207 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1208 {
1209 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1210 	struct node_info new_ni;
1211 	struct page *page;
1212 	int err;
1213 
1214 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1215 		return ERR_PTR(-EPERM);
1216 
1217 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1218 	if (!page)
1219 		return ERR_PTR(-ENOMEM);
1220 
1221 	if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1222 		goto fail;
1223 
1224 #ifdef CONFIG_F2FS_CHECK_FS
1225 	err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
1226 	if (err) {
1227 		dec_valid_node_count(sbi, dn->inode, !ofs);
1228 		goto fail;
1229 	}
1230 	f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
1231 #endif
1232 	new_ni.nid = dn->nid;
1233 	new_ni.ino = dn->inode->i_ino;
1234 	new_ni.blk_addr = NULL_ADDR;
1235 	new_ni.flag = 0;
1236 	new_ni.version = 0;
1237 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1238 
1239 	f2fs_wait_on_page_writeback(page, NODE, true, true);
1240 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1241 	set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1242 	if (!PageUptodate(page))
1243 		SetPageUptodate(page);
1244 	if (set_page_dirty(page))
1245 		dn->node_changed = true;
1246 
1247 	if (f2fs_has_xattr_block(ofs))
1248 		f2fs_i_xnid_write(dn->inode, dn->nid);
1249 
1250 	if (ofs == 0)
1251 		inc_valid_inode_count(sbi);
1252 	return page;
1253 
1254 fail:
1255 	clear_node_page_dirty(page);
1256 	f2fs_put_page(page, 1);
1257 	return ERR_PTR(err);
1258 }
1259 
1260 /*
1261  * Caller should do after getting the following values.
1262  * 0: f2fs_put_page(page, 0)
1263  * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1264  */
1265 static int read_node_page(struct page *page, int op_flags)
1266 {
1267 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1268 	struct node_info ni;
1269 	struct f2fs_io_info fio = {
1270 		.sbi = sbi,
1271 		.type = NODE,
1272 		.op = REQ_OP_READ,
1273 		.op_flags = op_flags,
1274 		.page = page,
1275 		.encrypted_page = NULL,
1276 	};
1277 	int err;
1278 
1279 	if (PageUptodate(page)) {
1280 #ifdef CONFIG_F2FS_CHECK_FS
1281 		f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page));
1282 #endif
1283 		return LOCKED_PAGE;
1284 	}
1285 
1286 	err = f2fs_get_node_info(sbi, page->index, &ni);
1287 	if (err)
1288 		return err;
1289 
1290 	if (unlikely(ni.blk_addr == NULL_ADDR) ||
1291 			is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
1292 		ClearPageUptodate(page);
1293 		return -ENOENT;
1294 	}
1295 
1296 	fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1297 	return f2fs_submit_page_bio(&fio);
1298 }
1299 
1300 /*
1301  * Readahead a node page
1302  */
1303 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1304 {
1305 	struct page *apage;
1306 	int err;
1307 
1308 	if (!nid)
1309 		return;
1310 	if (f2fs_check_nid_range(sbi, nid))
1311 		return;
1312 
1313 	apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1314 	if (apage)
1315 		return;
1316 
1317 	apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1318 	if (!apage)
1319 		return;
1320 
1321 	err = read_node_page(apage, REQ_RAHEAD);
1322 	f2fs_put_page(apage, err ? 1 : 0);
1323 }
1324 
1325 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1326 					struct page *parent, int start)
1327 {
1328 	struct page *page;
1329 	int err;
1330 
1331 	if (!nid)
1332 		return ERR_PTR(-ENOENT);
1333 	if (f2fs_check_nid_range(sbi, nid))
1334 		return ERR_PTR(-EINVAL);
1335 repeat:
1336 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1337 	if (!page)
1338 		return ERR_PTR(-ENOMEM);
1339 
1340 	err = read_node_page(page, 0);
1341 	if (err < 0) {
1342 		f2fs_put_page(page, 1);
1343 		return ERR_PTR(err);
1344 	} else if (err == LOCKED_PAGE) {
1345 		err = 0;
1346 		goto page_hit;
1347 	}
1348 
1349 	if (parent)
1350 		f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1351 
1352 	lock_page(page);
1353 
1354 	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1355 		f2fs_put_page(page, 1);
1356 		goto repeat;
1357 	}
1358 
1359 	if (unlikely(!PageUptodate(page))) {
1360 		err = -EIO;
1361 		goto out_err;
1362 	}
1363 
1364 	if (!f2fs_inode_chksum_verify(sbi, page)) {
1365 		err = -EBADMSG;
1366 		goto out_err;
1367 	}
1368 page_hit:
1369 	if(unlikely(nid != nid_of_node(page))) {
1370 		f2fs_msg(sbi->sb, KERN_WARNING, "inconsistent node block, "
1371 			"nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1372 			nid, nid_of_node(page), ino_of_node(page),
1373 			ofs_of_node(page), cpver_of_node(page),
1374 			next_blkaddr_of_node(page));
1375 		err = -EINVAL;
1376 out_err:
1377 		ClearPageUptodate(page);
1378 		f2fs_put_page(page, 1);
1379 		return ERR_PTR(err);
1380 	}
1381 	return page;
1382 }
1383 
1384 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1385 {
1386 	return __get_node_page(sbi, nid, NULL, 0);
1387 }
1388 
1389 struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1390 {
1391 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1392 	nid_t nid = get_nid(parent, start, false);
1393 
1394 	return __get_node_page(sbi, nid, parent, start);
1395 }
1396 
1397 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1398 {
1399 	struct inode *inode;
1400 	struct page *page;
1401 	int ret;
1402 
1403 	/* should flush inline_data before evict_inode */
1404 	inode = ilookup(sbi->sb, ino);
1405 	if (!inode)
1406 		return;
1407 
1408 	page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1409 					FGP_LOCK|FGP_NOWAIT, 0);
1410 	if (!page)
1411 		goto iput_out;
1412 
1413 	if (!PageUptodate(page))
1414 		goto page_out;
1415 
1416 	if (!PageDirty(page))
1417 		goto page_out;
1418 
1419 	if (!clear_page_dirty_for_io(page))
1420 		goto page_out;
1421 
1422 	ret = f2fs_write_inline_data(inode, page);
1423 	inode_dec_dirty_pages(inode);
1424 	f2fs_remove_dirty_inode(inode);
1425 	if (ret)
1426 		set_page_dirty(page);
1427 page_out:
1428 	f2fs_put_page(page, 1);
1429 iput_out:
1430 	iput(inode);
1431 }
1432 
1433 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1434 {
1435 	pgoff_t index;
1436 	struct pagevec pvec;
1437 	struct page *last_page = NULL;
1438 	int nr_pages;
1439 
1440 	pagevec_init(&pvec);
1441 	index = 0;
1442 
1443 	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1444 				PAGECACHE_TAG_DIRTY))) {
1445 		int i;
1446 
1447 		for (i = 0; i < nr_pages; i++) {
1448 			struct page *page = pvec.pages[i];
1449 
1450 			if (unlikely(f2fs_cp_error(sbi))) {
1451 				f2fs_put_page(last_page, 0);
1452 				pagevec_release(&pvec);
1453 				return ERR_PTR(-EIO);
1454 			}
1455 
1456 			if (!IS_DNODE(page) || !is_cold_node(page))
1457 				continue;
1458 			if (ino_of_node(page) != ino)
1459 				continue;
1460 
1461 			lock_page(page);
1462 
1463 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1464 continue_unlock:
1465 				unlock_page(page);
1466 				continue;
1467 			}
1468 			if (ino_of_node(page) != ino)
1469 				goto continue_unlock;
1470 
1471 			if (!PageDirty(page)) {
1472 				/* someone wrote it for us */
1473 				goto continue_unlock;
1474 			}
1475 
1476 			if (last_page)
1477 				f2fs_put_page(last_page, 0);
1478 
1479 			get_page(page);
1480 			last_page = page;
1481 			unlock_page(page);
1482 		}
1483 		pagevec_release(&pvec);
1484 		cond_resched();
1485 	}
1486 	return last_page;
1487 }
1488 
1489 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1490 				struct writeback_control *wbc, bool do_balance,
1491 				enum iostat_type io_type, unsigned int *seq_id)
1492 {
1493 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1494 	nid_t nid;
1495 	struct node_info ni;
1496 	struct f2fs_io_info fio = {
1497 		.sbi = sbi,
1498 		.ino = ino_of_node(page),
1499 		.type = NODE,
1500 		.op = REQ_OP_WRITE,
1501 		.op_flags = wbc_to_write_flags(wbc),
1502 		.page = page,
1503 		.encrypted_page = NULL,
1504 		.submitted = false,
1505 		.io_type = io_type,
1506 		.io_wbc = wbc,
1507 	};
1508 	unsigned int seq;
1509 
1510 	trace_f2fs_writepage(page, NODE);
1511 
1512 	if (unlikely(f2fs_cp_error(sbi)))
1513 		goto redirty_out;
1514 
1515 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1516 		goto redirty_out;
1517 
1518 	if (wbc->sync_mode == WB_SYNC_NONE &&
1519 			IS_DNODE(page) && is_cold_node(page))
1520 		goto redirty_out;
1521 
1522 	/* get old block addr of this node page */
1523 	nid = nid_of_node(page);
1524 	f2fs_bug_on(sbi, page->index != nid);
1525 
1526 	if (f2fs_get_node_info(sbi, nid, &ni))
1527 		goto redirty_out;
1528 
1529 	if (wbc->for_reclaim) {
1530 		if (!down_read_trylock(&sbi->node_write))
1531 			goto redirty_out;
1532 	} else {
1533 		down_read(&sbi->node_write);
1534 	}
1535 
1536 	/* This page is already truncated */
1537 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
1538 		ClearPageUptodate(page);
1539 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1540 		up_read(&sbi->node_write);
1541 		unlock_page(page);
1542 		return 0;
1543 	}
1544 
1545 	if (__is_valid_data_blkaddr(ni.blk_addr) &&
1546 		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC)) {
1547 		up_read(&sbi->node_write);
1548 		goto redirty_out;
1549 	}
1550 
1551 	if (atomic && !test_opt(sbi, NOBARRIER))
1552 		fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1553 
1554 	set_page_writeback(page);
1555 	ClearPageError(page);
1556 
1557 	if (f2fs_in_warm_node_list(sbi, page)) {
1558 		seq = f2fs_add_fsync_node_entry(sbi, page);
1559 		if (seq_id)
1560 			*seq_id = seq;
1561 	}
1562 
1563 	fio.old_blkaddr = ni.blk_addr;
1564 	f2fs_do_write_node_page(nid, &fio);
1565 	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1566 	dec_page_count(sbi, F2FS_DIRTY_NODES);
1567 	up_read(&sbi->node_write);
1568 
1569 	if (wbc->for_reclaim) {
1570 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
1571 		submitted = NULL;
1572 	}
1573 
1574 	unlock_page(page);
1575 
1576 	if (unlikely(f2fs_cp_error(sbi))) {
1577 		f2fs_submit_merged_write(sbi, NODE);
1578 		submitted = NULL;
1579 	}
1580 	if (submitted)
1581 		*submitted = fio.submitted;
1582 
1583 	if (do_balance)
1584 		f2fs_balance_fs(sbi, false);
1585 	return 0;
1586 
1587 redirty_out:
1588 	redirty_page_for_writepage(wbc, page);
1589 	return AOP_WRITEPAGE_ACTIVATE;
1590 }
1591 
1592 int f2fs_move_node_page(struct page *node_page, int gc_type)
1593 {
1594 	int err = 0;
1595 
1596 	if (gc_type == FG_GC) {
1597 		struct writeback_control wbc = {
1598 			.sync_mode = WB_SYNC_ALL,
1599 			.nr_to_write = 1,
1600 			.for_reclaim = 0,
1601 		};
1602 
1603 		f2fs_wait_on_page_writeback(node_page, NODE, true, true);
1604 
1605 		set_page_dirty(node_page);
1606 
1607 		if (!clear_page_dirty_for_io(node_page)) {
1608 			err = -EAGAIN;
1609 			goto out_page;
1610 		}
1611 
1612 		if (__write_node_page(node_page, false, NULL,
1613 					&wbc, false, FS_GC_NODE_IO, NULL)) {
1614 			err = -EAGAIN;
1615 			unlock_page(node_page);
1616 		}
1617 		goto release_page;
1618 	} else {
1619 		/* set page dirty and write it */
1620 		if (!PageWriteback(node_page))
1621 			set_page_dirty(node_page);
1622 	}
1623 out_page:
1624 	unlock_page(node_page);
1625 release_page:
1626 	f2fs_put_page(node_page, 0);
1627 	return err;
1628 }
1629 
1630 static int f2fs_write_node_page(struct page *page,
1631 				struct writeback_control *wbc)
1632 {
1633 	return __write_node_page(page, false, NULL, wbc, false,
1634 						FS_NODE_IO, NULL);
1635 }
1636 
1637 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1638 			struct writeback_control *wbc, bool atomic,
1639 			unsigned int *seq_id)
1640 {
1641 	pgoff_t index;
1642 	struct pagevec pvec;
1643 	int ret = 0;
1644 	struct page *last_page = NULL;
1645 	bool marked = false;
1646 	nid_t ino = inode->i_ino;
1647 	int nr_pages;
1648 	int nwritten = 0;
1649 
1650 	if (atomic) {
1651 		last_page = last_fsync_dnode(sbi, ino);
1652 		if (IS_ERR_OR_NULL(last_page))
1653 			return PTR_ERR_OR_ZERO(last_page);
1654 	}
1655 retry:
1656 	pagevec_init(&pvec);
1657 	index = 0;
1658 
1659 	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1660 				PAGECACHE_TAG_DIRTY))) {
1661 		int i;
1662 
1663 		for (i = 0; i < nr_pages; i++) {
1664 			struct page *page = pvec.pages[i];
1665 			bool submitted = false;
1666 
1667 			if (unlikely(f2fs_cp_error(sbi))) {
1668 				f2fs_put_page(last_page, 0);
1669 				pagevec_release(&pvec);
1670 				ret = -EIO;
1671 				goto out;
1672 			}
1673 
1674 			if (!IS_DNODE(page) || !is_cold_node(page))
1675 				continue;
1676 			if (ino_of_node(page) != ino)
1677 				continue;
1678 
1679 			lock_page(page);
1680 
1681 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1682 continue_unlock:
1683 				unlock_page(page);
1684 				continue;
1685 			}
1686 			if (ino_of_node(page) != ino)
1687 				goto continue_unlock;
1688 
1689 			if (!PageDirty(page) && page != last_page) {
1690 				/* someone wrote it for us */
1691 				goto continue_unlock;
1692 			}
1693 
1694 			f2fs_wait_on_page_writeback(page, NODE, true, true);
1695 
1696 			set_fsync_mark(page, 0);
1697 			set_dentry_mark(page, 0);
1698 
1699 			if (!atomic || page == last_page) {
1700 				set_fsync_mark(page, 1);
1701 				if (IS_INODE(page)) {
1702 					if (is_inode_flag_set(inode,
1703 								FI_DIRTY_INODE))
1704 						f2fs_update_inode(inode, page);
1705 					set_dentry_mark(page,
1706 						f2fs_need_dentry_mark(sbi, ino));
1707 				}
1708 				/*  may be written by other thread */
1709 				if (!PageDirty(page))
1710 					set_page_dirty(page);
1711 			}
1712 
1713 			if (!clear_page_dirty_for_io(page))
1714 				goto continue_unlock;
1715 
1716 			ret = __write_node_page(page, atomic &&
1717 						page == last_page,
1718 						&submitted, wbc, true,
1719 						FS_NODE_IO, seq_id);
1720 			if (ret) {
1721 				unlock_page(page);
1722 				f2fs_put_page(last_page, 0);
1723 				break;
1724 			} else if (submitted) {
1725 				nwritten++;
1726 			}
1727 
1728 			if (page == last_page) {
1729 				f2fs_put_page(page, 0);
1730 				marked = true;
1731 				break;
1732 			}
1733 		}
1734 		pagevec_release(&pvec);
1735 		cond_resched();
1736 
1737 		if (ret || marked)
1738 			break;
1739 	}
1740 	if (!ret && atomic && !marked) {
1741 		f2fs_msg(sbi->sb, KERN_DEBUG,
1742 			"Retry to write fsync mark: ino=%u, idx=%lx",
1743 					ino, last_page->index);
1744 		lock_page(last_page);
1745 		f2fs_wait_on_page_writeback(last_page, NODE, true, true);
1746 		set_page_dirty(last_page);
1747 		unlock_page(last_page);
1748 		goto retry;
1749 	}
1750 out:
1751 	if (nwritten)
1752 		f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1753 	return ret ? -EIO: 0;
1754 }
1755 
1756 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1757 				struct writeback_control *wbc,
1758 				bool do_balance, enum iostat_type io_type)
1759 {
1760 	pgoff_t index;
1761 	struct pagevec pvec;
1762 	int step = 0;
1763 	int nwritten = 0;
1764 	int ret = 0;
1765 	int nr_pages, done = 0;
1766 
1767 	pagevec_init(&pvec);
1768 
1769 next_step:
1770 	index = 0;
1771 
1772 	while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
1773 			NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1774 		int i;
1775 
1776 		for (i = 0; i < nr_pages; i++) {
1777 			struct page *page = pvec.pages[i];
1778 			bool submitted = false;
1779 
1780 			/* give a priority to WB_SYNC threads */
1781 			if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1782 					wbc->sync_mode == WB_SYNC_NONE) {
1783 				done = 1;
1784 				break;
1785 			}
1786 
1787 			/*
1788 			 * flushing sequence with step:
1789 			 * 0. indirect nodes
1790 			 * 1. dentry dnodes
1791 			 * 2. file dnodes
1792 			 */
1793 			if (step == 0 && IS_DNODE(page))
1794 				continue;
1795 			if (step == 1 && (!IS_DNODE(page) ||
1796 						is_cold_node(page)))
1797 				continue;
1798 			if (step == 2 && (!IS_DNODE(page) ||
1799 						!is_cold_node(page)))
1800 				continue;
1801 lock_node:
1802 			if (wbc->sync_mode == WB_SYNC_ALL)
1803 				lock_page(page);
1804 			else if (!trylock_page(page))
1805 				continue;
1806 
1807 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1808 continue_unlock:
1809 				unlock_page(page);
1810 				continue;
1811 			}
1812 
1813 			if (!PageDirty(page)) {
1814 				/* someone wrote it for us */
1815 				goto continue_unlock;
1816 			}
1817 
1818 			/* flush inline_data */
1819 			if (is_inline_node(page)) {
1820 				clear_inline_node(page);
1821 				unlock_page(page);
1822 				flush_inline_data(sbi, ino_of_node(page));
1823 				goto lock_node;
1824 			}
1825 
1826 			f2fs_wait_on_page_writeback(page, NODE, true, true);
1827 
1828 			if (!clear_page_dirty_for_io(page))
1829 				goto continue_unlock;
1830 
1831 			set_fsync_mark(page, 0);
1832 			set_dentry_mark(page, 0);
1833 
1834 			ret = __write_node_page(page, false, &submitted,
1835 						wbc, do_balance, io_type, NULL);
1836 			if (ret)
1837 				unlock_page(page);
1838 			else if (submitted)
1839 				nwritten++;
1840 
1841 			if (--wbc->nr_to_write == 0)
1842 				break;
1843 		}
1844 		pagevec_release(&pvec);
1845 		cond_resched();
1846 
1847 		if (wbc->nr_to_write == 0) {
1848 			step = 2;
1849 			break;
1850 		}
1851 	}
1852 
1853 	if (step < 2) {
1854 		if (wbc->sync_mode == WB_SYNC_NONE && step == 1)
1855 			goto out;
1856 		step++;
1857 		goto next_step;
1858 	}
1859 out:
1860 	if (nwritten)
1861 		f2fs_submit_merged_write(sbi, NODE);
1862 
1863 	if (unlikely(f2fs_cp_error(sbi)))
1864 		return -EIO;
1865 	return ret;
1866 }
1867 
1868 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
1869 						unsigned int seq_id)
1870 {
1871 	struct fsync_node_entry *fn;
1872 	struct page *page;
1873 	struct list_head *head = &sbi->fsync_node_list;
1874 	unsigned long flags;
1875 	unsigned int cur_seq_id = 0;
1876 	int ret2, ret = 0;
1877 
1878 	while (seq_id && cur_seq_id < seq_id) {
1879 		spin_lock_irqsave(&sbi->fsync_node_lock, flags);
1880 		if (list_empty(head)) {
1881 			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
1882 			break;
1883 		}
1884 		fn = list_first_entry(head, struct fsync_node_entry, list);
1885 		if (fn->seq_id > seq_id) {
1886 			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
1887 			break;
1888 		}
1889 		cur_seq_id = fn->seq_id;
1890 		page = fn->page;
1891 		get_page(page);
1892 		spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
1893 
1894 		f2fs_wait_on_page_writeback(page, NODE, true, false);
1895 		if (TestClearPageError(page))
1896 			ret = -EIO;
1897 
1898 		put_page(page);
1899 
1900 		if (ret)
1901 			break;
1902 	}
1903 
1904 	ret2 = filemap_check_errors(NODE_MAPPING(sbi));
1905 	if (!ret)
1906 		ret = ret2;
1907 
1908 	return ret;
1909 }
1910 
1911 static int f2fs_write_node_pages(struct address_space *mapping,
1912 			    struct writeback_control *wbc)
1913 {
1914 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1915 	struct blk_plug plug;
1916 	long diff;
1917 
1918 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1919 		goto skip_write;
1920 
1921 	/* balancing f2fs's metadata in background */
1922 	f2fs_balance_fs_bg(sbi);
1923 
1924 	/* collect a number of dirty node pages and write together */
1925 	if (wbc->sync_mode != WB_SYNC_ALL &&
1926 			get_pages(sbi, F2FS_DIRTY_NODES) <
1927 					nr_pages_to_skip(sbi, NODE))
1928 		goto skip_write;
1929 
1930 	if (wbc->sync_mode == WB_SYNC_ALL)
1931 		atomic_inc(&sbi->wb_sync_req[NODE]);
1932 	else if (atomic_read(&sbi->wb_sync_req[NODE]))
1933 		goto skip_write;
1934 
1935 	trace_f2fs_writepages(mapping->host, wbc, NODE);
1936 
1937 	diff = nr_pages_to_write(sbi, NODE, wbc);
1938 	blk_start_plug(&plug);
1939 	f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
1940 	blk_finish_plug(&plug);
1941 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1942 
1943 	if (wbc->sync_mode == WB_SYNC_ALL)
1944 		atomic_dec(&sbi->wb_sync_req[NODE]);
1945 	return 0;
1946 
1947 skip_write:
1948 	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1949 	trace_f2fs_writepages(mapping->host, wbc, NODE);
1950 	return 0;
1951 }
1952 
1953 static int f2fs_set_node_page_dirty(struct page *page)
1954 {
1955 	trace_f2fs_set_page_dirty(page, NODE);
1956 
1957 	if (!PageUptodate(page))
1958 		SetPageUptodate(page);
1959 #ifdef CONFIG_F2FS_CHECK_FS
1960 	if (IS_INODE(page))
1961 		f2fs_inode_chksum_set(F2FS_P_SB(page), page);
1962 #endif
1963 	if (!PageDirty(page)) {
1964 		__set_page_dirty_nobuffers(page);
1965 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
1966 		f2fs_set_page_private(page, 0);
1967 		f2fs_trace_pid(page);
1968 		return 1;
1969 	}
1970 	return 0;
1971 }
1972 
1973 /*
1974  * Structure of the f2fs node operations
1975  */
1976 const struct address_space_operations f2fs_node_aops = {
1977 	.writepage	= f2fs_write_node_page,
1978 	.writepages	= f2fs_write_node_pages,
1979 	.set_page_dirty	= f2fs_set_node_page_dirty,
1980 	.invalidatepage	= f2fs_invalidate_page,
1981 	.releasepage	= f2fs_release_page,
1982 #ifdef CONFIG_MIGRATION
1983 	.migratepage    = f2fs_migrate_page,
1984 #endif
1985 };
1986 
1987 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1988 						nid_t n)
1989 {
1990 	return radix_tree_lookup(&nm_i->free_nid_root, n);
1991 }
1992 
1993 static int __insert_free_nid(struct f2fs_sb_info *sbi,
1994 			struct free_nid *i, enum nid_state state)
1995 {
1996 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1997 
1998 	int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
1999 	if (err)
2000 		return err;
2001 
2002 	f2fs_bug_on(sbi, state != i->state);
2003 	nm_i->nid_cnt[state]++;
2004 	if (state == FREE_NID)
2005 		list_add_tail(&i->list, &nm_i->free_nid_list);
2006 	return 0;
2007 }
2008 
2009 static void __remove_free_nid(struct f2fs_sb_info *sbi,
2010 			struct free_nid *i, enum nid_state state)
2011 {
2012 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2013 
2014 	f2fs_bug_on(sbi, state != i->state);
2015 	nm_i->nid_cnt[state]--;
2016 	if (state == FREE_NID)
2017 		list_del(&i->list);
2018 	radix_tree_delete(&nm_i->free_nid_root, i->nid);
2019 }
2020 
2021 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2022 			enum nid_state org_state, enum nid_state dst_state)
2023 {
2024 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2025 
2026 	f2fs_bug_on(sbi, org_state != i->state);
2027 	i->state = dst_state;
2028 	nm_i->nid_cnt[org_state]--;
2029 	nm_i->nid_cnt[dst_state]++;
2030 
2031 	switch (dst_state) {
2032 	case PREALLOC_NID:
2033 		list_del(&i->list);
2034 		break;
2035 	case FREE_NID:
2036 		list_add_tail(&i->list, &nm_i->free_nid_list);
2037 		break;
2038 	default:
2039 		BUG_ON(1);
2040 	}
2041 }
2042 
2043 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2044 							bool set, bool build)
2045 {
2046 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2047 	unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2048 	unsigned int nid_ofs = nid - START_NID(nid);
2049 
2050 	if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2051 		return;
2052 
2053 	if (set) {
2054 		if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2055 			return;
2056 		__set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2057 		nm_i->free_nid_count[nat_ofs]++;
2058 	} else {
2059 		if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2060 			return;
2061 		__clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2062 		if (!build)
2063 			nm_i->free_nid_count[nat_ofs]--;
2064 	}
2065 }
2066 
2067 /* return if the nid is recognized as free */
2068 static bool add_free_nid(struct f2fs_sb_info *sbi,
2069 				nid_t nid, bool build, bool update)
2070 {
2071 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2072 	struct free_nid *i, *e;
2073 	struct nat_entry *ne;
2074 	int err = -EINVAL;
2075 	bool ret = false;
2076 
2077 	/* 0 nid should not be used */
2078 	if (unlikely(nid == 0))
2079 		return false;
2080 
2081 	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
2082 	i->nid = nid;
2083 	i->state = FREE_NID;
2084 
2085 	radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2086 
2087 	spin_lock(&nm_i->nid_list_lock);
2088 
2089 	if (build) {
2090 		/*
2091 		 *   Thread A             Thread B
2092 		 *  - f2fs_create
2093 		 *   - f2fs_new_inode
2094 		 *    - f2fs_alloc_nid
2095 		 *     - __insert_nid_to_list(PREALLOC_NID)
2096 		 *                     - f2fs_balance_fs_bg
2097 		 *                      - f2fs_build_free_nids
2098 		 *                       - __f2fs_build_free_nids
2099 		 *                        - scan_nat_page
2100 		 *                         - add_free_nid
2101 		 *                          - __lookup_nat_cache
2102 		 *  - f2fs_add_link
2103 		 *   - f2fs_init_inode_metadata
2104 		 *    - f2fs_new_inode_page
2105 		 *     - f2fs_new_node_page
2106 		 *      - set_node_addr
2107 		 *  - f2fs_alloc_nid_done
2108 		 *   - __remove_nid_from_list(PREALLOC_NID)
2109 		 *                         - __insert_nid_to_list(FREE_NID)
2110 		 */
2111 		ne = __lookup_nat_cache(nm_i, nid);
2112 		if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2113 				nat_get_blkaddr(ne) != NULL_ADDR))
2114 			goto err_out;
2115 
2116 		e = __lookup_free_nid_list(nm_i, nid);
2117 		if (e) {
2118 			if (e->state == FREE_NID)
2119 				ret = true;
2120 			goto err_out;
2121 		}
2122 	}
2123 	ret = true;
2124 	err = __insert_free_nid(sbi, i, FREE_NID);
2125 err_out:
2126 	if (update) {
2127 		update_free_nid_bitmap(sbi, nid, ret, build);
2128 		if (!build)
2129 			nm_i->available_nids++;
2130 	}
2131 	spin_unlock(&nm_i->nid_list_lock);
2132 	radix_tree_preload_end();
2133 
2134 	if (err)
2135 		kmem_cache_free(free_nid_slab, i);
2136 	return ret;
2137 }
2138 
2139 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2140 {
2141 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2142 	struct free_nid *i;
2143 	bool need_free = false;
2144 
2145 	spin_lock(&nm_i->nid_list_lock);
2146 	i = __lookup_free_nid_list(nm_i, nid);
2147 	if (i && i->state == FREE_NID) {
2148 		__remove_free_nid(sbi, i, FREE_NID);
2149 		need_free = true;
2150 	}
2151 	spin_unlock(&nm_i->nid_list_lock);
2152 
2153 	if (need_free)
2154 		kmem_cache_free(free_nid_slab, i);
2155 }
2156 
2157 static int scan_nat_page(struct f2fs_sb_info *sbi,
2158 			struct page *nat_page, nid_t start_nid)
2159 {
2160 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2161 	struct f2fs_nat_block *nat_blk = page_address(nat_page);
2162 	block_t blk_addr;
2163 	unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2164 	int i;
2165 
2166 	__set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2167 
2168 	i = start_nid % NAT_ENTRY_PER_BLOCK;
2169 
2170 	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2171 		if (unlikely(start_nid >= nm_i->max_nid))
2172 			break;
2173 
2174 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2175 
2176 		if (blk_addr == NEW_ADDR)
2177 			return -EINVAL;
2178 
2179 		if (blk_addr == NULL_ADDR) {
2180 			add_free_nid(sbi, start_nid, true, true);
2181 		} else {
2182 			spin_lock(&NM_I(sbi)->nid_list_lock);
2183 			update_free_nid_bitmap(sbi, start_nid, false, true);
2184 			spin_unlock(&NM_I(sbi)->nid_list_lock);
2185 		}
2186 	}
2187 
2188 	return 0;
2189 }
2190 
2191 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2192 {
2193 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2194 	struct f2fs_journal *journal = curseg->journal;
2195 	int i;
2196 
2197 	down_read(&curseg->journal_rwsem);
2198 	for (i = 0; i < nats_in_cursum(journal); i++) {
2199 		block_t addr;
2200 		nid_t nid;
2201 
2202 		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2203 		nid = le32_to_cpu(nid_in_journal(journal, i));
2204 		if (addr == NULL_ADDR)
2205 			add_free_nid(sbi, nid, true, false);
2206 		else
2207 			remove_free_nid(sbi, nid);
2208 	}
2209 	up_read(&curseg->journal_rwsem);
2210 }
2211 
2212 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2213 {
2214 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2215 	unsigned int i, idx;
2216 	nid_t nid;
2217 
2218 	down_read(&nm_i->nat_tree_lock);
2219 
2220 	for (i = 0; i < nm_i->nat_blocks; i++) {
2221 		if (!test_bit_le(i, nm_i->nat_block_bitmap))
2222 			continue;
2223 		if (!nm_i->free_nid_count[i])
2224 			continue;
2225 		for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2226 			idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2227 						NAT_ENTRY_PER_BLOCK, idx);
2228 			if (idx >= NAT_ENTRY_PER_BLOCK)
2229 				break;
2230 
2231 			nid = i * NAT_ENTRY_PER_BLOCK + idx;
2232 			add_free_nid(sbi, nid, true, false);
2233 
2234 			if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2235 				goto out;
2236 		}
2237 	}
2238 out:
2239 	scan_curseg_cache(sbi);
2240 
2241 	up_read(&nm_i->nat_tree_lock);
2242 }
2243 
2244 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2245 						bool sync, bool mount)
2246 {
2247 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2248 	int i = 0, ret;
2249 	nid_t nid = nm_i->next_scan_nid;
2250 
2251 	if (unlikely(nid >= nm_i->max_nid))
2252 		nid = 0;
2253 
2254 	/* Enough entries */
2255 	if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2256 		return 0;
2257 
2258 	if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2259 		return 0;
2260 
2261 	if (!mount) {
2262 		/* try to find free nids in free_nid_bitmap */
2263 		scan_free_nid_bits(sbi);
2264 
2265 		if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2266 			return 0;
2267 	}
2268 
2269 	/* readahead nat pages to be scanned */
2270 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2271 							META_NAT, true);
2272 
2273 	down_read(&nm_i->nat_tree_lock);
2274 
2275 	while (1) {
2276 		if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2277 						nm_i->nat_block_bitmap)) {
2278 			struct page *page = get_current_nat_page(sbi, nid);
2279 
2280 			if (IS_ERR(page)) {
2281 				ret = PTR_ERR(page);
2282 			} else {
2283 				ret = scan_nat_page(sbi, page, nid);
2284 				f2fs_put_page(page, 1);
2285 			}
2286 
2287 			if (ret) {
2288 				up_read(&nm_i->nat_tree_lock);
2289 				f2fs_bug_on(sbi, !mount);
2290 				f2fs_msg(sbi->sb, KERN_ERR,
2291 					"NAT is corrupt, run fsck to fix it");
2292 				return ret;
2293 			}
2294 		}
2295 
2296 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2297 		if (unlikely(nid >= nm_i->max_nid))
2298 			nid = 0;
2299 
2300 		if (++i >= FREE_NID_PAGES)
2301 			break;
2302 	}
2303 
2304 	/* go to the next free nat pages to find free nids abundantly */
2305 	nm_i->next_scan_nid = nid;
2306 
2307 	/* find free nids from current sum_pages */
2308 	scan_curseg_cache(sbi);
2309 
2310 	up_read(&nm_i->nat_tree_lock);
2311 
2312 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2313 					nm_i->ra_nid_pages, META_NAT, false);
2314 
2315 	return 0;
2316 }
2317 
2318 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2319 {
2320 	int ret;
2321 
2322 	mutex_lock(&NM_I(sbi)->build_lock);
2323 	ret = __f2fs_build_free_nids(sbi, sync, mount);
2324 	mutex_unlock(&NM_I(sbi)->build_lock);
2325 
2326 	return ret;
2327 }
2328 
2329 /*
2330  * If this function returns success, caller can obtain a new nid
2331  * from second parameter of this function.
2332  * The returned nid could be used ino as well as nid when inode is created.
2333  */
2334 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2335 {
2336 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2337 	struct free_nid *i = NULL;
2338 retry:
2339 	if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
2340 		f2fs_show_injection_info(FAULT_ALLOC_NID);
2341 		return false;
2342 	}
2343 
2344 	spin_lock(&nm_i->nid_list_lock);
2345 
2346 	if (unlikely(nm_i->available_nids == 0)) {
2347 		spin_unlock(&nm_i->nid_list_lock);
2348 		return false;
2349 	}
2350 
2351 	/* We should not use stale free nids created by f2fs_build_free_nids */
2352 	if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2353 		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2354 		i = list_first_entry(&nm_i->free_nid_list,
2355 					struct free_nid, list);
2356 		*nid = i->nid;
2357 
2358 		__move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2359 		nm_i->available_nids--;
2360 
2361 		update_free_nid_bitmap(sbi, *nid, false, false);
2362 
2363 		spin_unlock(&nm_i->nid_list_lock);
2364 		return true;
2365 	}
2366 	spin_unlock(&nm_i->nid_list_lock);
2367 
2368 	/* Let's scan nat pages and its caches to get free nids */
2369 	if (!f2fs_build_free_nids(sbi, true, false))
2370 		goto retry;
2371 	return false;
2372 }
2373 
2374 /*
2375  * f2fs_alloc_nid() should be called prior to this function.
2376  */
2377 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2378 {
2379 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2380 	struct free_nid *i;
2381 
2382 	spin_lock(&nm_i->nid_list_lock);
2383 	i = __lookup_free_nid_list(nm_i, nid);
2384 	f2fs_bug_on(sbi, !i);
2385 	__remove_free_nid(sbi, i, PREALLOC_NID);
2386 	spin_unlock(&nm_i->nid_list_lock);
2387 
2388 	kmem_cache_free(free_nid_slab, i);
2389 }
2390 
2391 /*
2392  * f2fs_alloc_nid() should be called prior to this function.
2393  */
2394 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2395 {
2396 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2397 	struct free_nid *i;
2398 	bool need_free = false;
2399 
2400 	if (!nid)
2401 		return;
2402 
2403 	spin_lock(&nm_i->nid_list_lock);
2404 	i = __lookup_free_nid_list(nm_i, nid);
2405 	f2fs_bug_on(sbi, !i);
2406 
2407 	if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2408 		__remove_free_nid(sbi, i, PREALLOC_NID);
2409 		need_free = true;
2410 	} else {
2411 		__move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2412 	}
2413 
2414 	nm_i->available_nids++;
2415 
2416 	update_free_nid_bitmap(sbi, nid, true, false);
2417 
2418 	spin_unlock(&nm_i->nid_list_lock);
2419 
2420 	if (need_free)
2421 		kmem_cache_free(free_nid_slab, i);
2422 }
2423 
2424 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2425 {
2426 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2427 	struct free_nid *i, *next;
2428 	int nr = nr_shrink;
2429 
2430 	if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2431 		return 0;
2432 
2433 	if (!mutex_trylock(&nm_i->build_lock))
2434 		return 0;
2435 
2436 	spin_lock(&nm_i->nid_list_lock);
2437 	list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2438 		if (nr_shrink <= 0 ||
2439 				nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2440 			break;
2441 
2442 		__remove_free_nid(sbi, i, FREE_NID);
2443 		kmem_cache_free(free_nid_slab, i);
2444 		nr_shrink--;
2445 	}
2446 	spin_unlock(&nm_i->nid_list_lock);
2447 	mutex_unlock(&nm_i->build_lock);
2448 
2449 	return nr - nr_shrink;
2450 }
2451 
2452 void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2453 {
2454 	void *src_addr, *dst_addr;
2455 	size_t inline_size;
2456 	struct page *ipage;
2457 	struct f2fs_inode *ri;
2458 
2459 	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
2460 	f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
2461 
2462 	ri = F2FS_INODE(page);
2463 	if (ri->i_inline & F2FS_INLINE_XATTR) {
2464 		set_inode_flag(inode, FI_INLINE_XATTR);
2465 	} else {
2466 		clear_inode_flag(inode, FI_INLINE_XATTR);
2467 		goto update_inode;
2468 	}
2469 
2470 	dst_addr = inline_xattr_addr(inode, ipage);
2471 	src_addr = inline_xattr_addr(inode, page);
2472 	inline_size = inline_xattr_size(inode);
2473 
2474 	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
2475 	memcpy(dst_addr, src_addr, inline_size);
2476 update_inode:
2477 	f2fs_update_inode(inode, ipage);
2478 	f2fs_put_page(ipage, 1);
2479 }
2480 
2481 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2482 {
2483 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2484 	nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2485 	nid_t new_xnid;
2486 	struct dnode_of_data dn;
2487 	struct node_info ni;
2488 	struct page *xpage;
2489 	int err;
2490 
2491 	if (!prev_xnid)
2492 		goto recover_xnid;
2493 
2494 	/* 1: invalidate the previous xattr nid */
2495 	err = f2fs_get_node_info(sbi, prev_xnid, &ni);
2496 	if (err)
2497 		return err;
2498 
2499 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
2500 	dec_valid_node_count(sbi, inode, false);
2501 	set_node_addr(sbi, &ni, NULL_ADDR, false);
2502 
2503 recover_xnid:
2504 	/* 2: update xattr nid in inode */
2505 	if (!f2fs_alloc_nid(sbi, &new_xnid))
2506 		return -ENOSPC;
2507 
2508 	set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2509 	xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
2510 	if (IS_ERR(xpage)) {
2511 		f2fs_alloc_nid_failed(sbi, new_xnid);
2512 		return PTR_ERR(xpage);
2513 	}
2514 
2515 	f2fs_alloc_nid_done(sbi, new_xnid);
2516 	f2fs_update_inode_page(inode);
2517 
2518 	/* 3: update and set xattr node page dirty */
2519 	memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
2520 
2521 	set_page_dirty(xpage);
2522 	f2fs_put_page(xpage, 1);
2523 
2524 	return 0;
2525 }
2526 
2527 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2528 {
2529 	struct f2fs_inode *src, *dst;
2530 	nid_t ino = ino_of_node(page);
2531 	struct node_info old_ni, new_ni;
2532 	struct page *ipage;
2533 	int err;
2534 
2535 	err = f2fs_get_node_info(sbi, ino, &old_ni);
2536 	if (err)
2537 		return err;
2538 
2539 	if (unlikely(old_ni.blk_addr != NULL_ADDR))
2540 		return -EINVAL;
2541 retry:
2542 	ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2543 	if (!ipage) {
2544 		congestion_wait(BLK_RW_ASYNC, HZ/50);
2545 		goto retry;
2546 	}
2547 
2548 	/* Should not use this inode from free nid list */
2549 	remove_free_nid(sbi, ino);
2550 
2551 	if (!PageUptodate(ipage))
2552 		SetPageUptodate(ipage);
2553 	fill_node_footer(ipage, ino, ino, 0, true);
2554 	set_cold_node(ipage, false);
2555 
2556 	src = F2FS_INODE(page);
2557 	dst = F2FS_INODE(ipage);
2558 
2559 	memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
2560 	dst->i_size = 0;
2561 	dst->i_blocks = cpu_to_le64(1);
2562 	dst->i_links = cpu_to_le32(1);
2563 	dst->i_xattr_nid = 0;
2564 	dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2565 	if (dst->i_inline & F2FS_EXTRA_ATTR) {
2566 		dst->i_extra_isize = src->i_extra_isize;
2567 
2568 		if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
2569 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2570 							i_inline_xattr_size))
2571 			dst->i_inline_xattr_size = src->i_inline_xattr_size;
2572 
2573 		if (f2fs_sb_has_project_quota(sbi) &&
2574 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2575 								i_projid))
2576 			dst->i_projid = src->i_projid;
2577 
2578 		if (f2fs_sb_has_inode_crtime(sbi) &&
2579 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2580 							i_crtime_nsec)) {
2581 			dst->i_crtime = src->i_crtime;
2582 			dst->i_crtime_nsec = src->i_crtime_nsec;
2583 		}
2584 	}
2585 
2586 	new_ni = old_ni;
2587 	new_ni.ino = ino;
2588 
2589 	if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2590 		WARN_ON(1);
2591 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2592 	inc_valid_inode_count(sbi);
2593 	set_page_dirty(ipage);
2594 	f2fs_put_page(ipage, 1);
2595 	return 0;
2596 }
2597 
2598 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2599 			unsigned int segno, struct f2fs_summary_block *sum)
2600 {
2601 	struct f2fs_node *rn;
2602 	struct f2fs_summary *sum_entry;
2603 	block_t addr;
2604 	int i, idx, last_offset, nrpages;
2605 
2606 	/* scan the node segment */
2607 	last_offset = sbi->blocks_per_seg;
2608 	addr = START_BLOCK(sbi, segno);
2609 	sum_entry = &sum->entries[0];
2610 
2611 	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2612 		nrpages = min(last_offset - i, BIO_MAX_PAGES);
2613 
2614 		/* readahead node pages */
2615 		f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2616 
2617 		for (idx = addr; idx < addr + nrpages; idx++) {
2618 			struct page *page = f2fs_get_tmp_page(sbi, idx);
2619 
2620 			if (IS_ERR(page))
2621 				return PTR_ERR(page);
2622 
2623 			rn = F2FS_NODE(page);
2624 			sum_entry->nid = rn->footer.nid;
2625 			sum_entry->version = 0;
2626 			sum_entry->ofs_in_node = 0;
2627 			sum_entry++;
2628 			f2fs_put_page(page, 1);
2629 		}
2630 
2631 		invalidate_mapping_pages(META_MAPPING(sbi), addr,
2632 							addr + nrpages);
2633 	}
2634 	return 0;
2635 }
2636 
2637 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2638 {
2639 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2640 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2641 	struct f2fs_journal *journal = curseg->journal;
2642 	int i;
2643 
2644 	down_write(&curseg->journal_rwsem);
2645 	for (i = 0; i < nats_in_cursum(journal); i++) {
2646 		struct nat_entry *ne;
2647 		struct f2fs_nat_entry raw_ne;
2648 		nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2649 
2650 		raw_ne = nat_in_journal(journal, i);
2651 
2652 		ne = __lookup_nat_cache(nm_i, nid);
2653 		if (!ne) {
2654 			ne = __alloc_nat_entry(nid, true);
2655 			__init_nat_entry(nm_i, ne, &raw_ne, true);
2656 		}
2657 
2658 		/*
2659 		 * if a free nat in journal has not been used after last
2660 		 * checkpoint, we should remove it from available nids,
2661 		 * since later we will add it again.
2662 		 */
2663 		if (!get_nat_flag(ne, IS_DIRTY) &&
2664 				le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2665 			spin_lock(&nm_i->nid_list_lock);
2666 			nm_i->available_nids--;
2667 			spin_unlock(&nm_i->nid_list_lock);
2668 		}
2669 
2670 		__set_nat_cache_dirty(nm_i, ne);
2671 	}
2672 	update_nats_in_cursum(journal, -i);
2673 	up_write(&curseg->journal_rwsem);
2674 }
2675 
2676 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2677 						struct list_head *head, int max)
2678 {
2679 	struct nat_entry_set *cur;
2680 
2681 	if (nes->entry_cnt >= max)
2682 		goto add_out;
2683 
2684 	list_for_each_entry(cur, head, set_list) {
2685 		if (cur->entry_cnt >= nes->entry_cnt) {
2686 			list_add(&nes->set_list, cur->set_list.prev);
2687 			return;
2688 		}
2689 	}
2690 add_out:
2691 	list_add_tail(&nes->set_list, head);
2692 }
2693 
2694 static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2695 						struct page *page)
2696 {
2697 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2698 	unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2699 	struct f2fs_nat_block *nat_blk = page_address(page);
2700 	int valid = 0;
2701 	int i = 0;
2702 
2703 	if (!enabled_nat_bits(sbi, NULL))
2704 		return;
2705 
2706 	if (nat_index == 0) {
2707 		valid = 1;
2708 		i = 1;
2709 	}
2710 	for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2711 		if (nat_blk->entries[i].block_addr != NULL_ADDR)
2712 			valid++;
2713 	}
2714 	if (valid == 0) {
2715 		__set_bit_le(nat_index, nm_i->empty_nat_bits);
2716 		__clear_bit_le(nat_index, nm_i->full_nat_bits);
2717 		return;
2718 	}
2719 
2720 	__clear_bit_le(nat_index, nm_i->empty_nat_bits);
2721 	if (valid == NAT_ENTRY_PER_BLOCK)
2722 		__set_bit_le(nat_index, nm_i->full_nat_bits);
2723 	else
2724 		__clear_bit_le(nat_index, nm_i->full_nat_bits);
2725 }
2726 
2727 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2728 		struct nat_entry_set *set, struct cp_control *cpc)
2729 {
2730 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2731 	struct f2fs_journal *journal = curseg->journal;
2732 	nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2733 	bool to_journal = true;
2734 	struct f2fs_nat_block *nat_blk;
2735 	struct nat_entry *ne, *cur;
2736 	struct page *page = NULL;
2737 
2738 	/*
2739 	 * there are two steps to flush nat entries:
2740 	 * #1, flush nat entries to journal in current hot data summary block.
2741 	 * #2, flush nat entries to nat page.
2742 	 */
2743 	if (enabled_nat_bits(sbi, cpc) ||
2744 		!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
2745 		to_journal = false;
2746 
2747 	if (to_journal) {
2748 		down_write(&curseg->journal_rwsem);
2749 	} else {
2750 		page = get_next_nat_page(sbi, start_nid);
2751 		if (IS_ERR(page))
2752 			return PTR_ERR(page);
2753 
2754 		nat_blk = page_address(page);
2755 		f2fs_bug_on(sbi, !nat_blk);
2756 	}
2757 
2758 	/* flush dirty nats in nat entry set */
2759 	list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
2760 		struct f2fs_nat_entry *raw_ne;
2761 		nid_t nid = nat_get_nid(ne);
2762 		int offset;
2763 
2764 		f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
2765 
2766 		if (to_journal) {
2767 			offset = f2fs_lookup_journal_in_cursum(journal,
2768 							NAT_JOURNAL, nid, 1);
2769 			f2fs_bug_on(sbi, offset < 0);
2770 			raw_ne = &nat_in_journal(journal, offset);
2771 			nid_in_journal(journal, offset) = cpu_to_le32(nid);
2772 		} else {
2773 			raw_ne = &nat_blk->entries[nid - start_nid];
2774 		}
2775 		raw_nat_from_node_info(raw_ne, &ne->ni);
2776 		nat_reset_flag(ne);
2777 		__clear_nat_cache_dirty(NM_I(sbi), set, ne);
2778 		if (nat_get_blkaddr(ne) == NULL_ADDR) {
2779 			add_free_nid(sbi, nid, false, true);
2780 		} else {
2781 			spin_lock(&NM_I(sbi)->nid_list_lock);
2782 			update_free_nid_bitmap(sbi, nid, false, false);
2783 			spin_unlock(&NM_I(sbi)->nid_list_lock);
2784 		}
2785 	}
2786 
2787 	if (to_journal) {
2788 		up_write(&curseg->journal_rwsem);
2789 	} else {
2790 		__update_nat_bits(sbi, start_nid, page);
2791 		f2fs_put_page(page, 1);
2792 	}
2793 
2794 	/* Allow dirty nats by node block allocation in write_begin */
2795 	if (!set->entry_cnt) {
2796 		radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
2797 		kmem_cache_free(nat_entry_set_slab, set);
2798 	}
2799 	return 0;
2800 }
2801 
2802 /*
2803  * This function is called during the checkpointing process.
2804  */
2805 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2806 {
2807 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2808 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2809 	struct f2fs_journal *journal = curseg->journal;
2810 	struct nat_entry_set *setvec[SETVEC_SIZE];
2811 	struct nat_entry_set *set, *tmp;
2812 	unsigned int found;
2813 	nid_t set_idx = 0;
2814 	LIST_HEAD(sets);
2815 	int err = 0;
2816 
2817 	/* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
2818 	if (enabled_nat_bits(sbi, cpc)) {
2819 		down_write(&nm_i->nat_tree_lock);
2820 		remove_nats_in_journal(sbi);
2821 		up_write(&nm_i->nat_tree_lock);
2822 	}
2823 
2824 	if (!nm_i->dirty_nat_cnt)
2825 		return 0;
2826 
2827 	down_write(&nm_i->nat_tree_lock);
2828 
2829 	/*
2830 	 * if there are no enough space in journal to store dirty nat
2831 	 * entries, remove all entries from journal and merge them
2832 	 * into nat entry set.
2833 	 */
2834 	if (enabled_nat_bits(sbi, cpc) ||
2835 		!__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
2836 		remove_nats_in_journal(sbi);
2837 
2838 	while ((found = __gang_lookup_nat_set(nm_i,
2839 					set_idx, SETVEC_SIZE, setvec))) {
2840 		unsigned idx;
2841 		set_idx = setvec[found - 1]->set + 1;
2842 		for (idx = 0; idx < found; idx++)
2843 			__adjust_nat_entry_set(setvec[idx], &sets,
2844 						MAX_NAT_JENTRIES(journal));
2845 	}
2846 
2847 	/* flush dirty nats in nat entry set */
2848 	list_for_each_entry_safe(set, tmp, &sets, set_list) {
2849 		err = __flush_nat_entry_set(sbi, set, cpc);
2850 		if (err)
2851 			break;
2852 	}
2853 
2854 	up_write(&nm_i->nat_tree_lock);
2855 	/* Allow dirty nats by node block allocation in write_begin */
2856 
2857 	return err;
2858 }
2859 
2860 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
2861 {
2862 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2863 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2864 	unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
2865 	unsigned int i;
2866 	__u64 cp_ver = cur_cp_version(ckpt);
2867 	block_t nat_bits_addr;
2868 
2869 	if (!enabled_nat_bits(sbi, NULL))
2870 		return 0;
2871 
2872 	nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
2873 	nm_i->nat_bits = f2fs_kzalloc(sbi,
2874 			nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
2875 	if (!nm_i->nat_bits)
2876 		return -ENOMEM;
2877 
2878 	nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
2879 						nm_i->nat_bits_blocks;
2880 	for (i = 0; i < nm_i->nat_bits_blocks; i++) {
2881 		struct page *page;
2882 
2883 		page = f2fs_get_meta_page(sbi, nat_bits_addr++);
2884 		if (IS_ERR(page))
2885 			return PTR_ERR(page);
2886 
2887 		memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
2888 					page_address(page), F2FS_BLKSIZE);
2889 		f2fs_put_page(page, 1);
2890 	}
2891 
2892 	cp_ver |= (cur_cp_crc(ckpt) << 32);
2893 	if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
2894 		disable_nat_bits(sbi, true);
2895 		return 0;
2896 	}
2897 
2898 	nm_i->full_nat_bits = nm_i->nat_bits + 8;
2899 	nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
2900 
2901 	f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint");
2902 	return 0;
2903 }
2904 
2905 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
2906 {
2907 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2908 	unsigned int i = 0;
2909 	nid_t nid, last_nid;
2910 
2911 	if (!enabled_nat_bits(sbi, NULL))
2912 		return;
2913 
2914 	for (i = 0; i < nm_i->nat_blocks; i++) {
2915 		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
2916 		if (i >= nm_i->nat_blocks)
2917 			break;
2918 
2919 		__set_bit_le(i, nm_i->nat_block_bitmap);
2920 
2921 		nid = i * NAT_ENTRY_PER_BLOCK;
2922 		last_nid = nid + NAT_ENTRY_PER_BLOCK;
2923 
2924 		spin_lock(&NM_I(sbi)->nid_list_lock);
2925 		for (; nid < last_nid; nid++)
2926 			update_free_nid_bitmap(sbi, nid, true, true);
2927 		spin_unlock(&NM_I(sbi)->nid_list_lock);
2928 	}
2929 
2930 	for (i = 0; i < nm_i->nat_blocks; i++) {
2931 		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
2932 		if (i >= nm_i->nat_blocks)
2933 			break;
2934 
2935 		__set_bit_le(i, nm_i->nat_block_bitmap);
2936 	}
2937 }
2938 
2939 static int init_node_manager(struct f2fs_sb_info *sbi)
2940 {
2941 	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
2942 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2943 	unsigned char *version_bitmap;
2944 	unsigned int nat_segs;
2945 	int err;
2946 
2947 	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
2948 
2949 	/* segment_count_nat includes pair segment so divide to 2. */
2950 	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
2951 	nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
2952 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
2953 
2954 	/* not used nids: 0, node, meta, (and root counted as valid node) */
2955 	nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
2956 				sbi->nquota_files - F2FS_RESERVED_NODE_NUM;
2957 	nm_i->nid_cnt[FREE_NID] = 0;
2958 	nm_i->nid_cnt[PREALLOC_NID] = 0;
2959 	nm_i->nat_cnt = 0;
2960 	nm_i->ram_thresh = DEF_RAM_THRESHOLD;
2961 	nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
2962 	nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
2963 
2964 	INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
2965 	INIT_LIST_HEAD(&nm_i->free_nid_list);
2966 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
2967 	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
2968 	INIT_LIST_HEAD(&nm_i->nat_entries);
2969 	spin_lock_init(&nm_i->nat_list_lock);
2970 
2971 	mutex_init(&nm_i->build_lock);
2972 	spin_lock_init(&nm_i->nid_list_lock);
2973 	init_rwsem(&nm_i->nat_tree_lock);
2974 
2975 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
2976 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
2977 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
2978 	if (!version_bitmap)
2979 		return -EFAULT;
2980 
2981 	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
2982 					GFP_KERNEL);
2983 	if (!nm_i->nat_bitmap)
2984 		return -ENOMEM;
2985 
2986 	err = __get_nat_bitmaps(sbi);
2987 	if (err)
2988 		return err;
2989 
2990 #ifdef CONFIG_F2FS_CHECK_FS
2991 	nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
2992 					GFP_KERNEL);
2993 	if (!nm_i->nat_bitmap_mir)
2994 		return -ENOMEM;
2995 #endif
2996 
2997 	return 0;
2998 }
2999 
3000 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
3001 {
3002 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3003 	int i;
3004 
3005 	nm_i->free_nid_bitmap =
3006 		f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *),
3007 					     nm_i->nat_blocks),
3008 			     GFP_KERNEL);
3009 	if (!nm_i->free_nid_bitmap)
3010 		return -ENOMEM;
3011 
3012 	for (i = 0; i < nm_i->nat_blocks; i++) {
3013 		nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3014 			f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3015 		if (!nm_i->free_nid_bitmap[i])
3016 			return -ENOMEM;
3017 	}
3018 
3019 	nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3020 								GFP_KERNEL);
3021 	if (!nm_i->nat_block_bitmap)
3022 		return -ENOMEM;
3023 
3024 	nm_i->free_nid_count =
3025 		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3026 					      nm_i->nat_blocks),
3027 			      GFP_KERNEL);
3028 	if (!nm_i->free_nid_count)
3029 		return -ENOMEM;
3030 	return 0;
3031 }
3032 
3033 int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3034 {
3035 	int err;
3036 
3037 	sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3038 							GFP_KERNEL);
3039 	if (!sbi->nm_info)
3040 		return -ENOMEM;
3041 
3042 	err = init_node_manager(sbi);
3043 	if (err)
3044 		return err;
3045 
3046 	err = init_free_nid_cache(sbi);
3047 	if (err)
3048 		return err;
3049 
3050 	/* load free nid status from nat_bits table */
3051 	load_free_nid_bitmap(sbi);
3052 
3053 	return f2fs_build_free_nids(sbi, true, true);
3054 }
3055 
3056 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3057 {
3058 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3059 	struct free_nid *i, *next_i;
3060 	struct nat_entry *natvec[NATVEC_SIZE];
3061 	struct nat_entry_set *setvec[SETVEC_SIZE];
3062 	nid_t nid = 0;
3063 	unsigned int found;
3064 
3065 	if (!nm_i)
3066 		return;
3067 
3068 	/* destroy free nid list */
3069 	spin_lock(&nm_i->nid_list_lock);
3070 	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3071 		__remove_free_nid(sbi, i, FREE_NID);
3072 		spin_unlock(&nm_i->nid_list_lock);
3073 		kmem_cache_free(free_nid_slab, i);
3074 		spin_lock(&nm_i->nid_list_lock);
3075 	}
3076 	f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3077 	f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3078 	f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3079 	spin_unlock(&nm_i->nid_list_lock);
3080 
3081 	/* destroy nat cache */
3082 	down_write(&nm_i->nat_tree_lock);
3083 	while ((found = __gang_lookup_nat_cache(nm_i,
3084 					nid, NATVEC_SIZE, natvec))) {
3085 		unsigned idx;
3086 
3087 		nid = nat_get_nid(natvec[found - 1]) + 1;
3088 		for (idx = 0; idx < found; idx++) {
3089 			spin_lock(&nm_i->nat_list_lock);
3090 			list_del(&natvec[idx]->list);
3091 			spin_unlock(&nm_i->nat_list_lock);
3092 
3093 			__del_from_nat_cache(nm_i, natvec[idx]);
3094 		}
3095 	}
3096 	f2fs_bug_on(sbi, nm_i->nat_cnt);
3097 
3098 	/* destroy nat set cache */
3099 	nid = 0;
3100 	while ((found = __gang_lookup_nat_set(nm_i,
3101 					nid, SETVEC_SIZE, setvec))) {
3102 		unsigned idx;
3103 
3104 		nid = setvec[found - 1]->set + 1;
3105 		for (idx = 0; idx < found; idx++) {
3106 			/* entry_cnt is not zero, when cp_error was occurred */
3107 			f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3108 			radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3109 			kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3110 		}
3111 	}
3112 	up_write(&nm_i->nat_tree_lock);
3113 
3114 	kvfree(nm_i->nat_block_bitmap);
3115 	if (nm_i->free_nid_bitmap) {
3116 		int i;
3117 
3118 		for (i = 0; i < nm_i->nat_blocks; i++)
3119 			kvfree(nm_i->free_nid_bitmap[i]);
3120 		kvfree(nm_i->free_nid_bitmap);
3121 	}
3122 	kvfree(nm_i->free_nid_count);
3123 
3124 	kvfree(nm_i->nat_bitmap);
3125 	kvfree(nm_i->nat_bits);
3126 #ifdef CONFIG_F2FS_CHECK_FS
3127 	kvfree(nm_i->nat_bitmap_mir);
3128 #endif
3129 	sbi->nm_info = NULL;
3130 	kvfree(nm_i);
3131 }
3132 
3133 int __init f2fs_create_node_manager_caches(void)
3134 {
3135 	nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
3136 			sizeof(struct nat_entry));
3137 	if (!nat_entry_slab)
3138 		goto fail;
3139 
3140 	free_nid_slab = f2fs_kmem_cache_create("free_nid",
3141 			sizeof(struct free_nid));
3142 	if (!free_nid_slab)
3143 		goto destroy_nat_entry;
3144 
3145 	nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
3146 			sizeof(struct nat_entry_set));
3147 	if (!nat_entry_set_slab)
3148 		goto destroy_free_nid;
3149 
3150 	fsync_node_entry_slab = f2fs_kmem_cache_create("fsync_node_entry",
3151 			sizeof(struct fsync_node_entry));
3152 	if (!fsync_node_entry_slab)
3153 		goto destroy_nat_entry_set;
3154 	return 0;
3155 
3156 destroy_nat_entry_set:
3157 	kmem_cache_destroy(nat_entry_set_slab);
3158 destroy_free_nid:
3159 	kmem_cache_destroy(free_nid_slab);
3160 destroy_nat_entry:
3161 	kmem_cache_destroy(nat_entry_slab);
3162 fail:
3163 	return -ENOMEM;
3164 }
3165 
3166 void f2fs_destroy_node_manager_caches(void)
3167 {
3168 	kmem_cache_destroy(fsync_node_entry_slab);
3169 	kmem_cache_destroy(nat_entry_set_slab);
3170 	kmem_cache_destroy(free_nid_slab);
3171 	kmem_cache_destroy(nat_entry_slab);
3172 }
3173