1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/node.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/mpage.h>
11 #include <linux/sched/mm.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagevec.h>
14 #include <linux/swap.h>
15
16 #include "f2fs.h"
17 #include "node.h"
18 #include "segment.h"
19 #include "xattr.h"
20 #include "iostat.h"
21 #include <trace/events/f2fs.h>
22
23 #define on_f2fs_build_free_nids(nm_i) mutex_is_locked(&(nm_i)->build_lock)
24
25 static struct kmem_cache *nat_entry_slab;
26 static struct kmem_cache *free_nid_slab;
27 static struct kmem_cache *nat_entry_set_slab;
28 static struct kmem_cache *fsync_node_entry_slab;
29
is_invalid_nid(struct f2fs_sb_info * sbi,nid_t nid)30 static inline bool is_invalid_nid(struct f2fs_sb_info *sbi, nid_t nid)
31 {
32 return nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid;
33 }
34
35 /*
36 * Check whether the given nid is within node id range.
37 */
f2fs_check_nid_range(struct f2fs_sb_info * sbi,nid_t nid)38 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
39 {
40 if (unlikely(is_invalid_nid(sbi, nid))) {
41 set_sbi_flag(sbi, SBI_NEED_FSCK);
42 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
43 __func__, nid);
44 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
45 return -EFSCORRUPTED;
46 }
47 return 0;
48 }
49
f2fs_available_free_memory(struct f2fs_sb_info * sbi,int type)50 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
51 {
52 struct f2fs_nm_info *nm_i = NM_I(sbi);
53 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
54 struct sysinfo val;
55 unsigned long avail_ram;
56 unsigned long mem_size = 0;
57 bool res = false;
58
59 if (!nm_i)
60 return true;
61
62 si_meminfo(&val);
63
64 /* only uses low memory */
65 avail_ram = val.totalram - val.totalhigh;
66
67 /*
68 * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively
69 */
70 if (type == FREE_NIDS) {
71 mem_size = (nm_i->nid_cnt[FREE_NID] *
72 sizeof(struct free_nid)) >> PAGE_SHIFT;
73 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
74 } else if (type == NAT_ENTRIES) {
75 mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
76 sizeof(struct nat_entry)) >> PAGE_SHIFT;
77 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
78 if (excess_cached_nats(sbi))
79 res = false;
80 } else if (type == DIRTY_DENTS) {
81 if (sbi->sb->s_bdi->wb.dirty_exceeded)
82 return false;
83 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
84 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
85 } else if (type == INO_ENTRIES) {
86 int i;
87
88 for (i = 0; i < MAX_INO_ENTRY; i++)
89 mem_size += sbi->im[i].ino_num *
90 sizeof(struct ino_entry);
91 mem_size >>= PAGE_SHIFT;
92 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
93 } else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) {
94 enum extent_type etype = type == READ_EXTENT_CACHE ?
95 EX_READ : EX_BLOCK_AGE;
96 struct extent_tree_info *eti = &sbi->extent_tree[etype];
97
98 mem_size = (atomic_read(&eti->total_ext_tree) *
99 sizeof(struct extent_tree) +
100 atomic_read(&eti->total_ext_node) *
101 sizeof(struct extent_node)) >> PAGE_SHIFT;
102 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
103 } else if (type == DISCARD_CACHE) {
104 mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
105 sizeof(struct discard_cmd)) >> PAGE_SHIFT;
106 res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
107 } else if (type == COMPRESS_PAGE) {
108 #ifdef CONFIG_F2FS_FS_COMPRESSION
109 unsigned long free_ram = val.freeram;
110
111 /*
112 * free memory is lower than watermark or cached page count
113 * exceed threshold, deny caching compress page.
114 */
115 res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
116 (COMPRESS_MAPPING(sbi)->nrpages <
117 free_ram * sbi->compress_percent / 100);
118 #else
119 res = false;
120 #endif
121 } else {
122 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
123 return true;
124 }
125 return res;
126 }
127
clear_node_folio_dirty(struct folio * folio)128 static void clear_node_folio_dirty(struct folio *folio)
129 {
130 if (folio_test_dirty(folio)) {
131 f2fs_clear_page_cache_dirty_tag(folio);
132 folio_clear_dirty_for_io(folio);
133 dec_page_count(F2FS_F_SB(folio), F2FS_DIRTY_NODES);
134 }
135 folio_clear_uptodate(folio);
136 }
137
get_current_nat_folio(struct f2fs_sb_info * sbi,nid_t nid)138 static struct folio *get_current_nat_folio(struct f2fs_sb_info *sbi, nid_t nid)
139 {
140 return f2fs_get_meta_folio_retry(sbi, current_nat_addr(sbi, nid));
141 }
142
get_next_nat_folio(struct f2fs_sb_info * sbi,nid_t nid)143 static struct folio *get_next_nat_folio(struct f2fs_sb_info *sbi, nid_t nid)
144 {
145 struct folio *src_folio;
146 struct folio *dst_folio;
147 pgoff_t dst_off;
148 void *src_addr;
149 void *dst_addr;
150 struct f2fs_nm_info *nm_i = NM_I(sbi);
151
152 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
153
154 /* get current nat block page with lock */
155 src_folio = get_current_nat_folio(sbi, nid);
156 if (IS_ERR(src_folio))
157 return src_folio;
158 dst_folio = f2fs_grab_meta_folio(sbi, dst_off);
159 f2fs_bug_on(sbi, folio_test_dirty(src_folio));
160
161 src_addr = folio_address(src_folio);
162 dst_addr = folio_address(dst_folio);
163 memcpy(dst_addr, src_addr, PAGE_SIZE);
164 folio_mark_dirty(dst_folio);
165 f2fs_folio_put(src_folio, true);
166
167 set_to_next_nat(nm_i, nid);
168
169 return dst_folio;
170 }
171
__alloc_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,bool no_fail)172 static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
173 nid_t nid, bool no_fail)
174 {
175 struct nat_entry *new;
176
177 new = f2fs_kmem_cache_alloc(nat_entry_slab,
178 GFP_F2FS_ZERO, no_fail, sbi);
179 if (new) {
180 nat_set_nid(new, nid);
181 nat_reset_flag(new);
182 }
183 return new;
184 }
185
__free_nat_entry(struct nat_entry * e)186 static void __free_nat_entry(struct nat_entry *e)
187 {
188 kmem_cache_free(nat_entry_slab, e);
189 }
190
191 /* must be locked by nat_tree_lock */
__init_nat_entry(struct f2fs_nm_info * nm_i,struct nat_entry * ne,struct f2fs_nat_entry * raw_ne,bool no_fail,bool init_dirty)192 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
193 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail, bool init_dirty)
194 {
195 if (no_fail)
196 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
197 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
198 return NULL;
199
200 if (raw_ne)
201 node_info_from_raw_nat(&ne->ni, raw_ne);
202
203 if (init_dirty) {
204 INIT_LIST_HEAD(&ne->list);
205 nm_i->nat_cnt[TOTAL_NAT]++;
206 return ne;
207 }
208
209 spin_lock(&nm_i->nat_list_lock);
210 list_add_tail(&ne->list, &nm_i->nat_entries);
211 spin_unlock(&nm_i->nat_list_lock);
212
213 nm_i->nat_cnt[TOTAL_NAT]++;
214 nm_i->nat_cnt[RECLAIMABLE_NAT]++;
215 return ne;
216 }
217
__lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t n,bool for_dirty)218 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n, bool for_dirty)
219 {
220 struct nat_entry *ne;
221
222 ne = radix_tree_lookup(&nm_i->nat_root, n);
223
224 /*
225 * for recent accessed nat entry which will not be dirtied soon
226 * later, move it to tail of lru list.
227 */
228 if (ne && !get_nat_flag(ne, IS_DIRTY) && !for_dirty) {
229 spin_lock(&nm_i->nat_list_lock);
230 if (!list_empty(&ne->list))
231 list_move_tail(&ne->list, &nm_i->nat_entries);
232 spin_unlock(&nm_i->nat_list_lock);
233 }
234
235 return ne;
236 }
237
__gang_lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry ** ep)238 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
239 nid_t start, unsigned int nr, struct nat_entry **ep)
240 {
241 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
242 }
243
__del_from_nat_cache(struct f2fs_nm_info * nm_i,struct nat_entry * e)244 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
245 {
246 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
247 nm_i->nat_cnt[TOTAL_NAT]--;
248 nm_i->nat_cnt[RECLAIMABLE_NAT]--;
249 __free_nat_entry(e);
250 }
251
__grab_nat_entry_set(struct f2fs_nm_info * nm_i,struct nat_entry * ne)252 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
253 struct nat_entry *ne)
254 {
255 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
256 struct nat_entry_set *head;
257
258 head = radix_tree_lookup(&nm_i->nat_set_root, set);
259 if (!head) {
260 head = f2fs_kmem_cache_alloc(nat_entry_set_slab,
261 GFP_NOFS, true, NULL);
262
263 INIT_LIST_HEAD(&head->entry_list);
264 INIT_LIST_HEAD(&head->set_list);
265 head->set = set;
266 head->entry_cnt = 0;
267 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
268 }
269 return head;
270 }
271
__set_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry * ne,bool init_dirty)272 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
273 struct nat_entry *ne, bool init_dirty)
274 {
275 struct nat_entry_set *head;
276 bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
277
278 if (!new_ne)
279 head = __grab_nat_entry_set(nm_i, ne);
280
281 /*
282 * update entry_cnt in below condition:
283 * 1. update NEW_ADDR to valid block address;
284 * 2. update old block address to new one;
285 */
286 if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
287 !get_nat_flag(ne, IS_DIRTY)))
288 head->entry_cnt++;
289
290 set_nat_flag(ne, IS_PREALLOC, new_ne);
291
292 if (get_nat_flag(ne, IS_DIRTY))
293 goto refresh_list;
294
295 nm_i->nat_cnt[DIRTY_NAT]++;
296 if (!init_dirty)
297 nm_i->nat_cnt[RECLAIMABLE_NAT]--;
298 set_nat_flag(ne, IS_DIRTY, true);
299 refresh_list:
300 spin_lock(&nm_i->nat_list_lock);
301 if (new_ne)
302 list_del_init(&ne->list);
303 else
304 list_move_tail(&ne->list, &head->entry_list);
305 spin_unlock(&nm_i->nat_list_lock);
306 }
307
__clear_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry_set * set,struct nat_entry * ne)308 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
309 struct nat_entry_set *set, struct nat_entry *ne)
310 {
311 spin_lock(&nm_i->nat_list_lock);
312 list_move_tail(&ne->list, &nm_i->nat_entries);
313 spin_unlock(&nm_i->nat_list_lock);
314
315 set_nat_flag(ne, IS_DIRTY, false);
316 set->entry_cnt--;
317 nm_i->nat_cnt[DIRTY_NAT]--;
318 nm_i->nat_cnt[RECLAIMABLE_NAT]++;
319 }
320
__gang_lookup_nat_set(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry_set ** ep)321 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
322 nid_t start, unsigned int nr, struct nat_entry_set **ep)
323 {
324 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
325 start, nr);
326 }
327
f2fs_in_warm_node_list(struct f2fs_sb_info * sbi,struct folio * folio)328 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio)
329 {
330 return is_node_folio(folio) && IS_DNODE(folio) && is_cold_node(folio);
331 }
332
f2fs_init_fsync_node_info(struct f2fs_sb_info * sbi)333 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
334 {
335 spin_lock_init(&sbi->fsync_node_lock);
336 INIT_LIST_HEAD(&sbi->fsync_node_list);
337 sbi->fsync_seg_id = 0;
338 sbi->fsync_node_num = 0;
339 }
340
f2fs_add_fsync_node_entry(struct f2fs_sb_info * sbi,struct folio * folio)341 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
342 struct folio *folio)
343 {
344 struct fsync_node_entry *fn;
345 unsigned long flags;
346 unsigned int seq_id;
347
348 fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
349 GFP_NOFS, true, NULL);
350
351 folio_get(folio);
352 fn->folio = folio;
353 INIT_LIST_HEAD(&fn->list);
354
355 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
356 list_add_tail(&fn->list, &sbi->fsync_node_list);
357 fn->seq_id = sbi->fsync_seg_id++;
358 seq_id = fn->seq_id;
359 sbi->fsync_node_num++;
360 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
361
362 return seq_id;
363 }
364
f2fs_del_fsync_node_entry(struct f2fs_sb_info * sbi,struct folio * folio)365 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio)
366 {
367 struct fsync_node_entry *fn;
368 unsigned long flags;
369
370 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
371 list_for_each_entry(fn, &sbi->fsync_node_list, list) {
372 if (fn->folio == folio) {
373 list_del(&fn->list);
374 sbi->fsync_node_num--;
375 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
376 kmem_cache_free(fsync_node_entry_slab, fn);
377 folio_put(folio);
378 return;
379 }
380 }
381 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
382 f2fs_bug_on(sbi, 1);
383 }
384
f2fs_reset_fsync_node_info(struct f2fs_sb_info * sbi)385 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
386 {
387 unsigned long flags;
388
389 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
390 sbi->fsync_seg_id = 0;
391 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
392 }
393
f2fs_need_dentry_mark(struct f2fs_sb_info * sbi,nid_t nid)394 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
395 {
396 struct f2fs_nm_info *nm_i = NM_I(sbi);
397 struct nat_entry *e;
398 bool need = false;
399
400 f2fs_down_read(&nm_i->nat_tree_lock);
401 e = __lookup_nat_cache(nm_i, nid, false);
402 if (e) {
403 if (!get_nat_flag(e, IS_CHECKPOINTED) &&
404 !get_nat_flag(e, HAS_FSYNCED_INODE))
405 need = true;
406 }
407 f2fs_up_read(&nm_i->nat_tree_lock);
408 return need;
409 }
410
f2fs_is_checkpointed_node(struct f2fs_sb_info * sbi,nid_t nid)411 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
412 {
413 struct f2fs_nm_info *nm_i = NM_I(sbi);
414 struct nat_entry *e;
415 bool is_cp = true;
416
417 f2fs_down_read(&nm_i->nat_tree_lock);
418 e = __lookup_nat_cache(nm_i, nid, false);
419 if (e && !get_nat_flag(e, IS_CHECKPOINTED))
420 is_cp = false;
421 f2fs_up_read(&nm_i->nat_tree_lock);
422 return is_cp;
423 }
424
f2fs_need_inode_block_update(struct f2fs_sb_info * sbi,nid_t ino)425 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
426 {
427 struct f2fs_nm_info *nm_i = NM_I(sbi);
428 struct nat_entry *e;
429 bool need_update = true;
430
431 f2fs_down_read(&nm_i->nat_tree_lock);
432 e = __lookup_nat_cache(nm_i, ino, false);
433 if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
434 (get_nat_flag(e, IS_CHECKPOINTED) ||
435 get_nat_flag(e, HAS_FSYNCED_INODE)))
436 need_update = false;
437 f2fs_up_read(&nm_i->nat_tree_lock);
438 return need_update;
439 }
440
441 /* must be locked by nat_tree_lock */
cache_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,struct f2fs_nat_entry * ne)442 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
443 struct f2fs_nat_entry *ne)
444 {
445 struct f2fs_nm_info *nm_i = NM_I(sbi);
446 struct nat_entry *new, *e;
447
448 /* Let's mitigate lock contention of nat_tree_lock during checkpoint */
449 if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
450 return;
451
452 new = __alloc_nat_entry(sbi, nid, false);
453 if (!new)
454 return;
455
456 f2fs_down_write(&nm_i->nat_tree_lock);
457 e = __lookup_nat_cache(nm_i, nid, false);
458 if (!e)
459 e = __init_nat_entry(nm_i, new, ne, false, false);
460 else
461 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
462 nat_get_blkaddr(e) !=
463 le32_to_cpu(ne->block_addr) ||
464 nat_get_version(e) != ne->version);
465 f2fs_up_write(&nm_i->nat_tree_lock);
466 if (e != new)
467 __free_nat_entry(new);
468 }
469
set_node_addr(struct f2fs_sb_info * sbi,struct node_info * ni,block_t new_blkaddr,bool fsync_done)470 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
471 block_t new_blkaddr, bool fsync_done)
472 {
473 struct f2fs_nm_info *nm_i = NM_I(sbi);
474 struct nat_entry *e;
475 struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
476 bool init_dirty = false;
477
478 f2fs_down_write(&nm_i->nat_tree_lock);
479 e = __lookup_nat_cache(nm_i, ni->nid, true);
480 if (!e) {
481 init_dirty = true;
482 e = __init_nat_entry(nm_i, new, NULL, true, true);
483 copy_node_info(&e->ni, ni);
484 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
485 } else if (new_blkaddr == NEW_ADDR) {
486 /*
487 * when nid is reallocated,
488 * previous nat entry can be remained in nat cache.
489 * So, reinitialize it with new information.
490 */
491 copy_node_info(&e->ni, ni);
492 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
493 }
494 /* let's free early to reduce memory consumption */
495 if (e != new)
496 __free_nat_entry(new);
497
498 /* sanity check */
499 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
500 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
501 new_blkaddr == NULL_ADDR);
502 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
503 new_blkaddr == NEW_ADDR);
504 f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
505 new_blkaddr == NEW_ADDR);
506
507 /* increment version no as node is removed */
508 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
509 unsigned char version = nat_get_version(e);
510
511 nat_set_version(e, inc_node_version(version));
512 }
513
514 /* change address */
515 nat_set_blkaddr(e, new_blkaddr);
516 if (!__is_valid_data_blkaddr(new_blkaddr))
517 set_nat_flag(e, IS_CHECKPOINTED, false);
518 __set_nat_cache_dirty(nm_i, e, init_dirty);
519
520 /* update fsync_mark if its inode nat entry is still alive */
521 if (ni->nid != ni->ino)
522 e = __lookup_nat_cache(nm_i, ni->ino, false);
523 if (e) {
524 if (fsync_done && ni->nid == ni->ino)
525 set_nat_flag(e, HAS_FSYNCED_INODE, true);
526 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
527 }
528 f2fs_up_write(&nm_i->nat_tree_lock);
529 }
530
f2fs_try_to_free_nats(struct f2fs_sb_info * sbi,int nr_shrink)531 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
532 {
533 struct f2fs_nm_info *nm_i = NM_I(sbi);
534 int nr = nr_shrink;
535
536 if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
537 return 0;
538
539 spin_lock(&nm_i->nat_list_lock);
540 while (nr_shrink) {
541 struct nat_entry *ne;
542
543 if (list_empty(&nm_i->nat_entries))
544 break;
545
546 ne = list_first_entry(&nm_i->nat_entries,
547 struct nat_entry, list);
548 list_del(&ne->list);
549 spin_unlock(&nm_i->nat_list_lock);
550
551 __del_from_nat_cache(nm_i, ne);
552 nr_shrink--;
553
554 spin_lock(&nm_i->nat_list_lock);
555 }
556 spin_unlock(&nm_i->nat_list_lock);
557
558 f2fs_up_write(&nm_i->nat_tree_lock);
559 return nr - nr_shrink;
560 }
561
f2fs_get_node_info(struct f2fs_sb_info * sbi,nid_t nid,struct node_info * ni,bool checkpoint_context)562 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
563 struct node_info *ni, bool checkpoint_context)
564 {
565 struct f2fs_nm_info *nm_i = NM_I(sbi);
566 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
567 struct f2fs_journal *journal = curseg->journal;
568 nid_t start_nid = START_NID(nid);
569 struct f2fs_nat_block *nat_blk;
570 struct folio *folio = NULL;
571 struct f2fs_nat_entry ne;
572 struct nat_entry *e;
573 pgoff_t index;
574 int i;
575 bool need_cache = true;
576
577 ni->flag = 0;
578 ni->nid = nid;
579 retry:
580 /* Check nat cache */
581 f2fs_down_read(&nm_i->nat_tree_lock);
582 e = __lookup_nat_cache(nm_i, nid, false);
583 if (e) {
584 ni->ino = nat_get_ino(e);
585 ni->blk_addr = nat_get_blkaddr(e);
586 ni->version = nat_get_version(e);
587 f2fs_up_read(&nm_i->nat_tree_lock);
588 if (IS_ENABLED(CONFIG_F2FS_CHECK_FS)) {
589 need_cache = false;
590 goto sanity_check;
591 }
592 return 0;
593 }
594
595 /*
596 * Check current segment summary by trying to grab journal_rwsem first.
597 * This sem is on the critical path on the checkpoint requiring the above
598 * nat_tree_lock. Therefore, we should retry, if we failed to grab here
599 * while not bothering checkpoint.
600 */
601 if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
602 down_read(&curseg->journal_rwsem);
603 } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
604 !down_read_trylock(&curseg->journal_rwsem)) {
605 f2fs_up_read(&nm_i->nat_tree_lock);
606 goto retry;
607 }
608
609 i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
610 if (i >= 0) {
611 ne = nat_in_journal(journal, i);
612 node_info_from_raw_nat(ni, &ne);
613 }
614 up_read(&curseg->journal_rwsem);
615 if (i >= 0) {
616 f2fs_up_read(&nm_i->nat_tree_lock);
617 goto sanity_check;
618 }
619
620 /* Fill node_info from nat page */
621 index = current_nat_addr(sbi, nid);
622 f2fs_up_read(&nm_i->nat_tree_lock);
623
624 folio = f2fs_get_meta_folio(sbi, index);
625 if (IS_ERR(folio))
626 return PTR_ERR(folio);
627
628 nat_blk = folio_address(folio);
629 ne = nat_blk->entries[nid - start_nid];
630 node_info_from_raw_nat(ni, &ne);
631 f2fs_folio_put(folio, true);
632 sanity_check:
633 if (__is_valid_data_blkaddr(ni->blk_addr) &&
634 !f2fs_is_valid_blkaddr(sbi, ni->blk_addr,
635 DATA_GENERIC_ENHANCE)) {
636 set_sbi_flag(sbi, SBI_NEED_FSCK);
637 f2fs_err_ratelimited(sbi,
638 "f2fs_get_node_info of %pS: inconsistent nat entry, "
639 "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u",
640 __builtin_return_address(0),
641 ni->ino, ni->nid, ni->blk_addr, ni->version, ni->flag);
642 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
643 return -EFSCORRUPTED;
644 }
645
646 /* cache nat entry */
647 if (need_cache)
648 cache_nat_entry(sbi, nid, &ne);
649 return 0;
650 }
651
652 /*
653 * readahead MAX_RA_NODE number of node pages.
654 */
f2fs_ra_node_pages(struct folio * parent,int start,int n)655 static void f2fs_ra_node_pages(struct folio *parent, int start, int n)
656 {
657 struct f2fs_sb_info *sbi = F2FS_F_SB(parent);
658 struct blk_plug plug;
659 int i, end;
660 nid_t nid;
661
662 blk_start_plug(&plug);
663
664 /* Then, try readahead for siblings of the desired node */
665 end = start + n;
666 end = min(end, (int)NIDS_PER_BLOCK);
667 for (i = start; i < end; i++) {
668 nid = get_nid(parent, i, false);
669 f2fs_ra_node_page(sbi, nid);
670 }
671
672 blk_finish_plug(&plug);
673 }
674
f2fs_get_next_page_offset(struct dnode_of_data * dn,pgoff_t pgofs)675 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
676 {
677 const long direct_index = ADDRS_PER_INODE(dn->inode);
678 const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
679 const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
680 unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
681 int cur_level = dn->cur_level;
682 int max_level = dn->max_level;
683 pgoff_t base = 0;
684
685 if (!dn->max_level)
686 return pgofs + 1;
687
688 while (max_level-- > cur_level)
689 skipped_unit *= NIDS_PER_BLOCK;
690
691 switch (dn->max_level) {
692 case 3:
693 base += 2 * indirect_blks;
694 fallthrough;
695 case 2:
696 base += 2 * direct_blks;
697 fallthrough;
698 case 1:
699 base += direct_index;
700 break;
701 default:
702 f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
703 }
704
705 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
706 }
707
708 /*
709 * The maximum depth is four.
710 * Offset[0] will have raw inode offset.
711 */
get_node_path(struct inode * inode,long block,int offset[4],unsigned int noffset[4])712 static int get_node_path(struct inode *inode, long block,
713 int offset[4], unsigned int noffset[4])
714 {
715 const long direct_index = ADDRS_PER_INODE(inode);
716 const long direct_blks = ADDRS_PER_BLOCK(inode);
717 const long dptrs_per_blk = NIDS_PER_BLOCK;
718 const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
719 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
720 int n = 0;
721 int level = 0;
722
723 noffset[0] = 0;
724
725 if (block < direct_index) {
726 offset[n] = block;
727 goto got;
728 }
729 block -= direct_index;
730 if (block < direct_blks) {
731 offset[n++] = NODE_DIR1_BLOCK;
732 noffset[n] = 1;
733 offset[n] = block;
734 level = 1;
735 goto got;
736 }
737 block -= direct_blks;
738 if (block < direct_blks) {
739 offset[n++] = NODE_DIR2_BLOCK;
740 noffset[n] = 2;
741 offset[n] = block;
742 level = 1;
743 goto got;
744 }
745 block -= direct_blks;
746 if (block < indirect_blks) {
747 offset[n++] = NODE_IND1_BLOCK;
748 noffset[n] = 3;
749 offset[n++] = block / direct_blks;
750 noffset[n] = 4 + offset[n - 1];
751 offset[n] = block % direct_blks;
752 level = 2;
753 goto got;
754 }
755 block -= indirect_blks;
756 if (block < indirect_blks) {
757 offset[n++] = NODE_IND2_BLOCK;
758 noffset[n] = 4 + dptrs_per_blk;
759 offset[n++] = block / direct_blks;
760 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
761 offset[n] = block % direct_blks;
762 level = 2;
763 goto got;
764 }
765 block -= indirect_blks;
766 if (block < dindirect_blks) {
767 offset[n++] = NODE_DIND_BLOCK;
768 noffset[n] = 5 + (dptrs_per_blk * 2);
769 offset[n++] = block / indirect_blks;
770 noffset[n] = 6 + (dptrs_per_blk * 2) +
771 offset[n - 1] * (dptrs_per_blk + 1);
772 offset[n++] = (block / direct_blks) % dptrs_per_blk;
773 noffset[n] = 7 + (dptrs_per_blk * 2) +
774 offset[n - 2] * (dptrs_per_blk + 1) +
775 offset[n - 1];
776 offset[n] = block % direct_blks;
777 level = 3;
778 goto got;
779 } else {
780 return -E2BIG;
781 }
782 got:
783 return level;
784 }
785
786 static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start);
787
788 /*
789 * Caller should call f2fs_put_dnode(dn).
790 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
791 * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
792 */
f2fs_get_dnode_of_data(struct dnode_of_data * dn,pgoff_t index,int mode)793 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
794 {
795 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
796 struct folio *nfolio[4];
797 struct folio *parent = NULL;
798 int offset[4];
799 unsigned int noffset[4];
800 nid_t nids[4];
801 int level, i = 0;
802 int err = 0;
803
804 level = get_node_path(dn->inode, index, offset, noffset);
805 if (level < 0)
806 return level;
807
808 nids[0] = dn->inode->i_ino;
809
810 if (!dn->inode_folio) {
811 nfolio[0] = f2fs_get_inode_folio(sbi, nids[0]);
812 if (IS_ERR(nfolio[0]))
813 return PTR_ERR(nfolio[0]);
814 } else {
815 nfolio[0] = dn->inode_folio;
816 }
817
818 /* if inline_data is set, should not report any block indices */
819 if (f2fs_has_inline_data(dn->inode) && index) {
820 err = -ENOENT;
821 f2fs_folio_put(nfolio[0], true);
822 goto release_out;
823 }
824
825 parent = nfolio[0];
826 if (level != 0)
827 nids[1] = get_nid(parent, offset[0], true);
828 dn->inode_folio = nfolio[0];
829 dn->inode_folio_locked = true;
830
831 /* get indirect or direct nodes */
832 for (i = 1; i <= level; i++) {
833 bool done = false;
834
835 if (nids[i] && nids[i] == dn->inode->i_ino) {
836 err = -EFSCORRUPTED;
837 f2fs_err_ratelimited(sbi,
838 "inode mapping table is corrupted, run fsck to fix it, "
839 "ino:%lu, nid:%u, level:%d, offset:%d",
840 dn->inode->i_ino, nids[i], level, offset[level]);
841 set_sbi_flag(sbi, SBI_NEED_FSCK);
842 goto release_pages;
843 }
844
845 if (!nids[i] && mode == ALLOC_NODE) {
846 /* alloc new node */
847 if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
848 err = -ENOSPC;
849 goto release_pages;
850 }
851
852 dn->nid = nids[i];
853 nfolio[i] = f2fs_new_node_folio(dn, noffset[i]);
854 if (IS_ERR(nfolio[i])) {
855 f2fs_alloc_nid_failed(sbi, nids[i]);
856 err = PTR_ERR(nfolio[i]);
857 goto release_pages;
858 }
859
860 set_nid(parent, offset[i - 1], nids[i], i == 1);
861 f2fs_alloc_nid_done(sbi, nids[i]);
862 done = true;
863 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
864 nfolio[i] = f2fs_get_node_folio_ra(parent, offset[i - 1]);
865 if (IS_ERR(nfolio[i])) {
866 err = PTR_ERR(nfolio[i]);
867 goto release_pages;
868 }
869 done = true;
870 }
871 if (i == 1) {
872 dn->inode_folio_locked = false;
873 folio_unlock(parent);
874 } else {
875 f2fs_folio_put(parent, true);
876 }
877
878 if (!done) {
879 nfolio[i] = f2fs_get_node_folio(sbi, nids[i],
880 NODE_TYPE_NON_INODE);
881 if (IS_ERR(nfolio[i])) {
882 err = PTR_ERR(nfolio[i]);
883 f2fs_folio_put(nfolio[0], false);
884 goto release_out;
885 }
886 }
887 if (i < level) {
888 parent = nfolio[i];
889 nids[i + 1] = get_nid(parent, offset[i], false);
890 }
891 }
892 dn->nid = nids[level];
893 dn->ofs_in_node = offset[level];
894 dn->node_folio = nfolio[level];
895 dn->data_blkaddr = f2fs_data_blkaddr(dn);
896
897 if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
898 f2fs_sb_has_readonly(sbi)) {
899 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
900 unsigned int ofs_in_node = dn->ofs_in_node;
901 pgoff_t fofs = index;
902 unsigned int c_len;
903 block_t blkaddr;
904
905 /* should align fofs and ofs_in_node to cluster_size */
906 if (fofs % cluster_size) {
907 fofs = round_down(fofs, cluster_size);
908 ofs_in_node = round_down(ofs_in_node, cluster_size);
909 }
910
911 c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node);
912 if (!c_len)
913 goto out;
914
915 blkaddr = data_blkaddr(dn->inode, dn->node_folio, ofs_in_node);
916 if (blkaddr == COMPRESS_ADDR)
917 blkaddr = data_blkaddr(dn->inode, dn->node_folio,
918 ofs_in_node + 1);
919
920 f2fs_update_read_extent_tree_range_compressed(dn->inode,
921 fofs, blkaddr, cluster_size, c_len);
922 }
923 out:
924 return 0;
925
926 release_pages:
927 f2fs_folio_put(parent, true);
928 if (i > 1)
929 f2fs_folio_put(nfolio[0], false);
930 release_out:
931 dn->inode_folio = NULL;
932 dn->node_folio = NULL;
933 if (err == -ENOENT) {
934 dn->cur_level = i;
935 dn->max_level = level;
936 dn->ofs_in_node = offset[level];
937 }
938 return err;
939 }
940
truncate_node(struct dnode_of_data * dn)941 static int truncate_node(struct dnode_of_data *dn)
942 {
943 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
944 struct node_info ni;
945 int err;
946 pgoff_t index;
947
948 err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
949 if (err)
950 return err;
951
952 if (ni.blk_addr != NEW_ADDR &&
953 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC_ENHANCE)) {
954 f2fs_err_ratelimited(sbi,
955 "nat entry is corrupted, run fsck to fix it, ino:%u, "
956 "nid:%u, blkaddr:%u", ni.ino, ni.nid, ni.blk_addr);
957 set_sbi_flag(sbi, SBI_NEED_FSCK);
958 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
959 return -EFSCORRUPTED;
960 }
961
962 /* Deallocate node address */
963 f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
964 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
965 set_node_addr(sbi, &ni, NULL_ADDR, false);
966
967 if (dn->nid == dn->inode->i_ino) {
968 f2fs_remove_orphan_inode(sbi, dn->nid);
969 dec_valid_inode_count(sbi);
970 f2fs_inode_synced(dn->inode);
971 }
972
973 clear_node_folio_dirty(dn->node_folio);
974 set_sbi_flag(sbi, SBI_IS_DIRTY);
975
976 index = dn->node_folio->index;
977 f2fs_folio_put(dn->node_folio, true);
978
979 invalidate_mapping_pages(NODE_MAPPING(sbi),
980 index, index);
981
982 dn->node_folio = NULL;
983 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
984
985 return 0;
986 }
987
truncate_dnode(struct dnode_of_data * dn)988 static int truncate_dnode(struct dnode_of_data *dn)
989 {
990 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
991 struct folio *folio;
992 int err;
993
994 if (dn->nid == 0)
995 return 1;
996
997 /* get direct node */
998 folio = f2fs_get_node_folio(sbi, dn->nid, NODE_TYPE_NON_INODE);
999 if (PTR_ERR(folio) == -ENOENT)
1000 return 1;
1001 else if (IS_ERR(folio))
1002 return PTR_ERR(folio);
1003
1004 if (IS_INODE(folio) || ino_of_node(folio) != dn->inode->i_ino) {
1005 f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u",
1006 dn->inode->i_ino, dn->nid, ino_of_node(folio));
1007 set_sbi_flag(sbi, SBI_NEED_FSCK);
1008 f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE);
1009 f2fs_folio_put(folio, true);
1010 return -EFSCORRUPTED;
1011 }
1012
1013 /* Make dnode_of_data for parameter */
1014 dn->node_folio = folio;
1015 dn->ofs_in_node = 0;
1016 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
1017 err = truncate_node(dn);
1018 if (err) {
1019 f2fs_folio_put(folio, true);
1020 return err;
1021 }
1022
1023 return 1;
1024 }
1025
truncate_nodes(struct dnode_of_data * dn,unsigned int nofs,int ofs,int depth)1026 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
1027 int ofs, int depth)
1028 {
1029 struct dnode_of_data rdn = *dn;
1030 struct folio *folio;
1031 struct f2fs_node *rn;
1032 nid_t child_nid;
1033 unsigned int child_nofs;
1034 int freed = 0;
1035 int i, ret;
1036
1037 if (dn->nid == 0)
1038 return NIDS_PER_BLOCK + 1;
1039
1040 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
1041
1042 folio = f2fs_get_node_folio(F2FS_I_SB(dn->inode), dn->nid,
1043 NODE_TYPE_NON_INODE);
1044 if (IS_ERR(folio)) {
1045 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(folio));
1046 return PTR_ERR(folio);
1047 }
1048
1049 f2fs_ra_node_pages(folio, ofs, NIDS_PER_BLOCK);
1050
1051 rn = F2FS_NODE(folio);
1052 if (depth < 3) {
1053 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
1054 child_nid = le32_to_cpu(rn->in.nid[i]);
1055 if (child_nid == 0)
1056 continue;
1057 rdn.nid = child_nid;
1058 ret = truncate_dnode(&rdn);
1059 if (ret < 0)
1060 goto out_err;
1061 if (set_nid(folio, i, 0, false))
1062 dn->node_changed = true;
1063 }
1064 } else {
1065 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
1066 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
1067 child_nid = le32_to_cpu(rn->in.nid[i]);
1068 if (child_nid == 0) {
1069 child_nofs += NIDS_PER_BLOCK + 1;
1070 continue;
1071 }
1072 rdn.nid = child_nid;
1073 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
1074 if (ret == (NIDS_PER_BLOCK + 1)) {
1075 if (set_nid(folio, i, 0, false))
1076 dn->node_changed = true;
1077 child_nofs += ret;
1078 } else if (ret < 0 && ret != -ENOENT) {
1079 goto out_err;
1080 }
1081 }
1082 freed = child_nofs;
1083 }
1084
1085 if (!ofs) {
1086 /* remove current indirect node */
1087 dn->node_folio = folio;
1088 ret = truncate_node(dn);
1089 if (ret)
1090 goto out_err;
1091 freed++;
1092 } else {
1093 f2fs_folio_put(folio, true);
1094 }
1095 trace_f2fs_truncate_nodes_exit(dn->inode, freed);
1096 return freed;
1097
1098 out_err:
1099 f2fs_folio_put(folio, true);
1100 trace_f2fs_truncate_nodes_exit(dn->inode, ret);
1101 return ret;
1102 }
1103
truncate_partial_nodes(struct dnode_of_data * dn,struct f2fs_inode * ri,int * offset,int depth)1104 static int truncate_partial_nodes(struct dnode_of_data *dn,
1105 struct f2fs_inode *ri, int *offset, int depth)
1106 {
1107 struct folio *folios[2];
1108 nid_t nid[3];
1109 nid_t child_nid;
1110 int err = 0;
1111 int i;
1112 int idx = depth - 2;
1113
1114 nid[0] = get_nid(dn->inode_folio, offset[0], true);
1115 if (!nid[0])
1116 return 0;
1117
1118 /* get indirect nodes in the path */
1119 for (i = 0; i < idx + 1; i++) {
1120 /* reference count'll be increased */
1121 folios[i] = f2fs_get_node_folio(F2FS_I_SB(dn->inode), nid[i],
1122 NODE_TYPE_NON_INODE);
1123 if (IS_ERR(folios[i])) {
1124 err = PTR_ERR(folios[i]);
1125 idx = i - 1;
1126 goto fail;
1127 }
1128 nid[i + 1] = get_nid(folios[i], offset[i + 1], false);
1129 }
1130
1131 f2fs_ra_node_pages(folios[idx], offset[idx + 1], NIDS_PER_BLOCK);
1132
1133 /* free direct nodes linked to a partial indirect node */
1134 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
1135 child_nid = get_nid(folios[idx], i, false);
1136 if (!child_nid)
1137 continue;
1138 dn->nid = child_nid;
1139 err = truncate_dnode(dn);
1140 if (err < 0)
1141 goto fail;
1142 if (set_nid(folios[idx], i, 0, false))
1143 dn->node_changed = true;
1144 }
1145
1146 if (offset[idx + 1] == 0) {
1147 dn->node_folio = folios[idx];
1148 dn->nid = nid[idx];
1149 err = truncate_node(dn);
1150 if (err)
1151 goto fail;
1152 } else {
1153 f2fs_folio_put(folios[idx], true);
1154 }
1155 offset[idx]++;
1156 offset[idx + 1] = 0;
1157 idx--;
1158 fail:
1159 for (i = idx; i >= 0; i--)
1160 f2fs_folio_put(folios[i], true);
1161
1162 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1163
1164 return err;
1165 }
1166
1167 /*
1168 * All the block addresses of data and nodes should be nullified.
1169 */
f2fs_truncate_inode_blocks(struct inode * inode,pgoff_t from)1170 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1171 {
1172 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1173 int err = 0, cont = 1;
1174 int level, offset[4], noffset[4];
1175 unsigned int nofs = 0;
1176 struct f2fs_inode *ri;
1177 struct dnode_of_data dn;
1178 struct folio *folio;
1179
1180 trace_f2fs_truncate_inode_blocks_enter(inode, from);
1181
1182 level = get_node_path(inode, from, offset, noffset);
1183 if (level <= 0) {
1184 if (!level) {
1185 level = -EFSCORRUPTED;
1186 f2fs_err(sbi, "%s: inode ino=%lx has corrupted node block, from:%lu addrs:%u",
1187 __func__, inode->i_ino,
1188 from, ADDRS_PER_INODE(inode));
1189 set_sbi_flag(sbi, SBI_NEED_FSCK);
1190 }
1191 trace_f2fs_truncate_inode_blocks_exit(inode, level);
1192 return level;
1193 }
1194
1195 folio = f2fs_get_inode_folio(sbi, inode->i_ino);
1196 if (IS_ERR(folio)) {
1197 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(folio));
1198 return PTR_ERR(folio);
1199 }
1200
1201 set_new_dnode(&dn, inode, folio, NULL, 0);
1202 folio_unlock(folio);
1203
1204 ri = F2FS_INODE(folio);
1205 switch (level) {
1206 case 0:
1207 case 1:
1208 nofs = noffset[1];
1209 break;
1210 case 2:
1211 nofs = noffset[1];
1212 if (!offset[level - 1])
1213 goto skip_partial;
1214 err = truncate_partial_nodes(&dn, ri, offset, level);
1215 if (err < 0 && err != -ENOENT)
1216 goto fail;
1217 nofs += 1 + NIDS_PER_BLOCK;
1218 break;
1219 case 3:
1220 nofs = 5 + 2 * NIDS_PER_BLOCK;
1221 if (!offset[level - 1])
1222 goto skip_partial;
1223 err = truncate_partial_nodes(&dn, ri, offset, level);
1224 if (err < 0 && err != -ENOENT)
1225 goto fail;
1226 break;
1227 default:
1228 BUG();
1229 }
1230
1231 skip_partial:
1232 while (cont) {
1233 dn.nid = get_nid(folio, offset[0], true);
1234 switch (offset[0]) {
1235 case NODE_DIR1_BLOCK:
1236 case NODE_DIR2_BLOCK:
1237 err = truncate_dnode(&dn);
1238 break;
1239
1240 case NODE_IND1_BLOCK:
1241 case NODE_IND2_BLOCK:
1242 err = truncate_nodes(&dn, nofs, offset[1], 2);
1243 break;
1244
1245 case NODE_DIND_BLOCK:
1246 err = truncate_nodes(&dn, nofs, offset[1], 3);
1247 cont = 0;
1248 break;
1249
1250 default:
1251 BUG();
1252 }
1253 if (err == -ENOENT) {
1254 set_sbi_flag(F2FS_F_SB(folio), SBI_NEED_FSCK);
1255 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1256 f2fs_err_ratelimited(sbi,
1257 "truncate node fail, ino:%lu, nid:%u, "
1258 "offset[0]:%d, offset[1]:%d, nofs:%d",
1259 inode->i_ino, dn.nid, offset[0],
1260 offset[1], nofs);
1261 err = 0;
1262 }
1263 if (err < 0)
1264 goto fail;
1265 if (offset[1] == 0 && get_nid(folio, offset[0], true)) {
1266 folio_lock(folio);
1267 BUG_ON(!is_node_folio(folio));
1268 set_nid(folio, offset[0], 0, true);
1269 folio_unlock(folio);
1270 }
1271 offset[1] = 0;
1272 offset[0]++;
1273 nofs += err;
1274 }
1275 fail:
1276 f2fs_folio_put(folio, false);
1277 trace_f2fs_truncate_inode_blocks_exit(inode, err);
1278 return err > 0 ? 0 : err;
1279 }
1280
1281 /* caller must lock inode page */
f2fs_truncate_xattr_node(struct inode * inode)1282 int f2fs_truncate_xattr_node(struct inode *inode)
1283 {
1284 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1285 nid_t nid = F2FS_I(inode)->i_xattr_nid;
1286 struct dnode_of_data dn;
1287 struct folio *nfolio;
1288 int err;
1289
1290 if (!nid)
1291 return 0;
1292
1293 nfolio = f2fs_get_xnode_folio(sbi, nid);
1294 if (IS_ERR(nfolio))
1295 return PTR_ERR(nfolio);
1296
1297 set_new_dnode(&dn, inode, NULL, nfolio, nid);
1298 err = truncate_node(&dn);
1299 if (err) {
1300 f2fs_folio_put(nfolio, true);
1301 return err;
1302 }
1303
1304 f2fs_i_xnid_write(inode, 0);
1305
1306 return 0;
1307 }
1308
1309 /*
1310 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1311 * f2fs_unlock_op().
1312 */
f2fs_remove_inode_page(struct inode * inode)1313 int f2fs_remove_inode_page(struct inode *inode)
1314 {
1315 struct dnode_of_data dn;
1316 int err;
1317
1318 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1319 err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1320 if (err)
1321 return err;
1322
1323 err = f2fs_truncate_xattr_node(inode);
1324 if (err) {
1325 f2fs_put_dnode(&dn);
1326 return err;
1327 }
1328
1329 /* remove potential inline_data blocks */
1330 if (!IS_DEVICE_ALIASING(inode) &&
1331 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1332 S_ISLNK(inode->i_mode)))
1333 f2fs_truncate_data_blocks_range(&dn, 1);
1334
1335 /* 0 is possible, after f2fs_new_inode() has failed */
1336 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1337 f2fs_put_dnode(&dn);
1338 return -EIO;
1339 }
1340
1341 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1342 f2fs_warn(F2FS_I_SB(inode),
1343 "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1344 inode->i_ino, (unsigned long long)inode->i_blocks);
1345 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1346 }
1347
1348 /* will put inode & node pages */
1349 err = truncate_node(&dn);
1350 if (err) {
1351 f2fs_put_dnode(&dn);
1352 return err;
1353 }
1354 return 0;
1355 }
1356
f2fs_new_inode_folio(struct inode * inode)1357 struct folio *f2fs_new_inode_folio(struct inode *inode)
1358 {
1359 struct dnode_of_data dn;
1360
1361 /* allocate inode page for new inode */
1362 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1363
1364 /* caller should f2fs_folio_put(folio, true); */
1365 return f2fs_new_node_folio(&dn, 0);
1366 }
1367
f2fs_new_node_folio(struct dnode_of_data * dn,unsigned int ofs)1368 struct folio *f2fs_new_node_folio(struct dnode_of_data *dn, unsigned int ofs)
1369 {
1370 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1371 struct node_info new_ni;
1372 struct folio *folio;
1373 int err;
1374
1375 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1376 return ERR_PTR(-EPERM);
1377
1378 folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), dn->nid, false);
1379 if (IS_ERR(folio))
1380 return folio;
1381
1382 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1383 goto fail;
1384
1385 #ifdef CONFIG_F2FS_CHECK_FS
1386 err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
1387 if (err) {
1388 dec_valid_node_count(sbi, dn->inode, !ofs);
1389 goto fail;
1390 }
1391 if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
1392 err = -EFSCORRUPTED;
1393 dec_valid_node_count(sbi, dn->inode, !ofs);
1394 set_sbi_flag(sbi, SBI_NEED_FSCK);
1395 f2fs_warn_ratelimited(sbi,
1396 "f2fs_new_node_folio: inconsistent nat entry, "
1397 "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u",
1398 new_ni.ino, new_ni.nid, new_ni.blk_addr,
1399 new_ni.version, new_ni.flag);
1400 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
1401 goto fail;
1402 }
1403 #endif
1404 new_ni.nid = dn->nid;
1405 new_ni.ino = dn->inode->i_ino;
1406 new_ni.blk_addr = NULL_ADDR;
1407 new_ni.flag = 0;
1408 new_ni.version = 0;
1409 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1410
1411 f2fs_folio_wait_writeback(folio, NODE, true, true);
1412 fill_node_footer(folio, dn->nid, dn->inode->i_ino, ofs, true);
1413 set_cold_node(folio, S_ISDIR(dn->inode->i_mode));
1414 if (!folio_test_uptodate(folio))
1415 folio_mark_uptodate(folio);
1416 if (folio_mark_dirty(folio))
1417 dn->node_changed = true;
1418
1419 if (f2fs_has_xattr_block(ofs))
1420 f2fs_i_xnid_write(dn->inode, dn->nid);
1421
1422 if (ofs == 0)
1423 inc_valid_inode_count(sbi);
1424 return folio;
1425 fail:
1426 clear_node_folio_dirty(folio);
1427 f2fs_folio_put(folio, true);
1428 return ERR_PTR(err);
1429 }
1430
1431 /*
1432 * Caller should do after getting the following values.
1433 * 0: f2fs_folio_put(folio, false)
1434 * LOCKED_PAGE or error: f2fs_folio_put(folio, true)
1435 */
read_node_folio(struct folio * folio,blk_opf_t op_flags)1436 static int read_node_folio(struct folio *folio, blk_opf_t op_flags)
1437 {
1438 struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
1439 struct node_info ni;
1440 struct f2fs_io_info fio = {
1441 .sbi = sbi,
1442 .type = NODE,
1443 .op = REQ_OP_READ,
1444 .op_flags = op_flags,
1445 .folio = folio,
1446 .encrypted_page = NULL,
1447 };
1448 int err;
1449
1450 if (folio_test_uptodate(folio)) {
1451 if (!f2fs_inode_chksum_verify(sbi, folio)) {
1452 folio_clear_uptodate(folio);
1453 return -EFSBADCRC;
1454 }
1455 return LOCKED_PAGE;
1456 }
1457
1458 err = f2fs_get_node_info(sbi, folio->index, &ni, false);
1459 if (err)
1460 return err;
1461
1462 /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
1463 if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
1464 folio_clear_uptodate(folio);
1465 return -ENOENT;
1466 }
1467
1468 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1469
1470 err = f2fs_submit_page_bio(&fio);
1471
1472 if (!err)
1473 f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE);
1474
1475 return err;
1476 }
1477
1478 /*
1479 * Readahead a node page
1480 */
f2fs_ra_node_page(struct f2fs_sb_info * sbi,nid_t nid)1481 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1482 {
1483 struct folio *afolio;
1484 int err;
1485
1486 if (!nid)
1487 return;
1488 if (f2fs_check_nid_range(sbi, nid))
1489 return;
1490
1491 afolio = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1492 if (afolio)
1493 return;
1494
1495 afolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false);
1496 if (IS_ERR(afolio))
1497 return;
1498
1499 err = read_node_folio(afolio, REQ_RAHEAD);
1500 f2fs_folio_put(afolio, err ? true : false);
1501 }
1502
sanity_check_node_footer(struct f2fs_sb_info * sbi,struct folio * folio,pgoff_t nid,enum node_type ntype)1503 static int sanity_check_node_footer(struct f2fs_sb_info *sbi,
1504 struct folio *folio, pgoff_t nid,
1505 enum node_type ntype)
1506 {
1507 if (unlikely(nid != nid_of_node(folio)))
1508 goto out_err;
1509
1510 switch (ntype) {
1511 case NODE_TYPE_INODE:
1512 if (!IS_INODE(folio))
1513 goto out_err;
1514 break;
1515 case NODE_TYPE_XATTR:
1516 if (!f2fs_has_xattr_block(ofs_of_node(folio)))
1517 goto out_err;
1518 break;
1519 case NODE_TYPE_NON_INODE:
1520 if (IS_INODE(folio))
1521 goto out_err;
1522 break;
1523 default:
1524 break;
1525 }
1526 if (time_to_inject(sbi, FAULT_INCONSISTENT_FOOTER))
1527 goto out_err;
1528 return 0;
1529 out_err:
1530 f2fs_warn(sbi, "inconsistent node block, node_type:%d, nid:%lu, "
1531 "node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1532 ntype, nid, nid_of_node(folio), ino_of_node(folio),
1533 ofs_of_node(folio), cpver_of_node(folio),
1534 next_blkaddr_of_node(folio));
1535 set_sbi_flag(sbi, SBI_NEED_FSCK);
1536 f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
1537 return -EFSCORRUPTED;
1538 }
1539
__get_node_folio(struct f2fs_sb_info * sbi,pgoff_t nid,struct folio * parent,int start,enum node_type ntype)1540 static struct folio *__get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid,
1541 struct folio *parent, int start, enum node_type ntype)
1542 {
1543 struct folio *folio;
1544 int err;
1545
1546 if (!nid)
1547 return ERR_PTR(-ENOENT);
1548 if (f2fs_check_nid_range(sbi, nid))
1549 return ERR_PTR(-EINVAL);
1550 repeat:
1551 folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false);
1552 if (IS_ERR(folio))
1553 return folio;
1554
1555 err = read_node_folio(folio, 0);
1556 if (err < 0)
1557 goto out_put_err;
1558 if (err == LOCKED_PAGE)
1559 goto page_hit;
1560
1561 if (parent)
1562 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1563
1564 folio_lock(folio);
1565
1566 if (unlikely(!is_node_folio(folio))) {
1567 f2fs_folio_put(folio, true);
1568 goto repeat;
1569 }
1570
1571 if (unlikely(!folio_test_uptodate(folio))) {
1572 err = -EIO;
1573 goto out_put_err;
1574 }
1575
1576 if (!f2fs_inode_chksum_verify(sbi, folio)) {
1577 err = -EFSBADCRC;
1578 goto out_err;
1579 }
1580 page_hit:
1581 err = sanity_check_node_footer(sbi, folio, nid, ntype);
1582 if (!err)
1583 return folio;
1584 out_err:
1585 folio_clear_uptodate(folio);
1586 out_put_err:
1587 /* ENOENT comes from read_node_folio which is not an error. */
1588 if (err != -ENOENT)
1589 f2fs_handle_page_eio(sbi, folio, NODE);
1590 f2fs_folio_put(folio, true);
1591 return ERR_PTR(err);
1592 }
1593
f2fs_get_node_folio(struct f2fs_sb_info * sbi,pgoff_t nid,enum node_type node_type)1594 struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid,
1595 enum node_type node_type)
1596 {
1597 return __get_node_folio(sbi, nid, NULL, 0, node_type);
1598 }
1599
f2fs_get_inode_folio(struct f2fs_sb_info * sbi,pgoff_t ino)1600 struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino)
1601 {
1602 return __get_node_folio(sbi, ino, NULL, 0, NODE_TYPE_INODE);
1603 }
1604
f2fs_get_xnode_folio(struct f2fs_sb_info * sbi,pgoff_t xnid)1605 struct folio *f2fs_get_xnode_folio(struct f2fs_sb_info *sbi, pgoff_t xnid)
1606 {
1607 return __get_node_folio(sbi, xnid, NULL, 0, NODE_TYPE_XATTR);
1608 }
1609
f2fs_get_node_folio_ra(struct folio * parent,int start)1610 static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start)
1611 {
1612 struct f2fs_sb_info *sbi = F2FS_F_SB(parent);
1613 nid_t nid = get_nid(parent, start, false);
1614
1615 return __get_node_folio(sbi, nid, parent, start, NODE_TYPE_REGULAR);
1616 }
1617
flush_inline_data(struct f2fs_sb_info * sbi,nid_t ino)1618 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1619 {
1620 struct inode *inode;
1621 struct folio *folio;
1622 int ret;
1623
1624 /* should flush inline_data before evict_inode */
1625 inode = ilookup(sbi->sb, ino);
1626 if (!inode)
1627 return;
1628
1629 folio = f2fs_filemap_get_folio(inode->i_mapping, 0,
1630 FGP_LOCK|FGP_NOWAIT, 0);
1631 if (IS_ERR(folio))
1632 goto iput_out;
1633
1634 if (!folio_test_uptodate(folio))
1635 goto folio_out;
1636
1637 if (!folio_test_dirty(folio))
1638 goto folio_out;
1639
1640 if (!folio_clear_dirty_for_io(folio))
1641 goto folio_out;
1642
1643 ret = f2fs_write_inline_data(inode, folio);
1644 inode_dec_dirty_pages(inode);
1645 f2fs_remove_dirty_inode(inode);
1646 if (ret)
1647 folio_mark_dirty(folio);
1648 folio_out:
1649 f2fs_folio_put(folio, true);
1650 iput_out:
1651 iput(inode);
1652 }
1653
last_fsync_dnode(struct f2fs_sb_info * sbi,nid_t ino)1654 static struct folio *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1655 {
1656 pgoff_t index;
1657 struct folio_batch fbatch;
1658 struct folio *last_folio = NULL;
1659 int nr_folios;
1660
1661 folio_batch_init(&fbatch);
1662 index = 0;
1663
1664 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1665 (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1666 &fbatch))) {
1667 int i;
1668
1669 for (i = 0; i < nr_folios; i++) {
1670 struct folio *folio = fbatch.folios[i];
1671
1672 if (unlikely(f2fs_cp_error(sbi))) {
1673 f2fs_folio_put(last_folio, false);
1674 folio_batch_release(&fbatch);
1675 return ERR_PTR(-EIO);
1676 }
1677
1678 if (!IS_DNODE(folio) || !is_cold_node(folio))
1679 continue;
1680 if (ino_of_node(folio) != ino)
1681 continue;
1682
1683 folio_lock(folio);
1684
1685 if (unlikely(!is_node_folio(folio))) {
1686 continue_unlock:
1687 folio_unlock(folio);
1688 continue;
1689 }
1690 if (ino_of_node(folio) != ino)
1691 goto continue_unlock;
1692
1693 if (!folio_test_dirty(folio)) {
1694 /* someone wrote it for us */
1695 goto continue_unlock;
1696 }
1697
1698 if (last_folio)
1699 f2fs_folio_put(last_folio, false);
1700
1701 folio_get(folio);
1702 last_folio = folio;
1703 folio_unlock(folio);
1704 }
1705 folio_batch_release(&fbatch);
1706 cond_resched();
1707 }
1708 return last_folio;
1709 }
1710
__write_node_folio(struct folio * folio,bool atomic,bool * submitted,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type,unsigned int * seq_id)1711 static bool __write_node_folio(struct folio *folio, bool atomic, bool *submitted,
1712 struct writeback_control *wbc, bool do_balance,
1713 enum iostat_type io_type, unsigned int *seq_id)
1714 {
1715 struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
1716 nid_t nid;
1717 struct node_info ni;
1718 struct f2fs_io_info fio = {
1719 .sbi = sbi,
1720 .ino = ino_of_node(folio),
1721 .type = NODE,
1722 .op = REQ_OP_WRITE,
1723 .op_flags = wbc_to_write_flags(wbc),
1724 .folio = folio,
1725 .encrypted_page = NULL,
1726 .submitted = 0,
1727 .io_type = io_type,
1728 .io_wbc = wbc,
1729 };
1730 unsigned int seq;
1731
1732 trace_f2fs_writepage(folio, NODE);
1733
1734 if (unlikely(f2fs_cp_error(sbi))) {
1735 /* keep node pages in remount-ro mode */
1736 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
1737 goto redirty_out;
1738 folio_clear_uptodate(folio);
1739 dec_page_count(sbi, F2FS_DIRTY_NODES);
1740 folio_unlock(folio);
1741 return true;
1742 }
1743
1744 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1745 goto redirty_out;
1746
1747 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1748 wbc->sync_mode == WB_SYNC_NONE &&
1749 IS_DNODE(folio) && is_cold_node(folio))
1750 goto redirty_out;
1751
1752 /* get old block addr of this node page */
1753 nid = nid_of_node(folio);
1754 f2fs_bug_on(sbi, folio->index != nid);
1755
1756 if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
1757 goto redirty_out;
1758
1759 f2fs_down_read(&sbi->node_write);
1760
1761 /* This page is already truncated */
1762 if (unlikely(ni.blk_addr == NULL_ADDR)) {
1763 folio_clear_uptodate(folio);
1764 dec_page_count(sbi, F2FS_DIRTY_NODES);
1765 f2fs_up_read(&sbi->node_write);
1766 folio_unlock(folio);
1767 return true;
1768 }
1769
1770 if (__is_valid_data_blkaddr(ni.blk_addr) &&
1771 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
1772 DATA_GENERIC_ENHANCE)) {
1773 f2fs_up_read(&sbi->node_write);
1774 goto redirty_out;
1775 }
1776
1777 if (atomic && !test_opt(sbi, NOBARRIER))
1778 fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1779
1780 /* should add to global list before clearing PAGECACHE status */
1781 if (f2fs_in_warm_node_list(sbi, folio)) {
1782 seq = f2fs_add_fsync_node_entry(sbi, folio);
1783 if (seq_id)
1784 *seq_id = seq;
1785 }
1786
1787 folio_start_writeback(folio);
1788
1789 fio.old_blkaddr = ni.blk_addr;
1790 f2fs_do_write_node_page(nid, &fio);
1791 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(folio));
1792 dec_page_count(sbi, F2FS_DIRTY_NODES);
1793 f2fs_up_read(&sbi->node_write);
1794
1795 folio_unlock(folio);
1796
1797 if (unlikely(f2fs_cp_error(sbi))) {
1798 f2fs_submit_merged_write(sbi, NODE);
1799 submitted = NULL;
1800 }
1801 if (submitted)
1802 *submitted = fio.submitted;
1803
1804 if (do_balance)
1805 f2fs_balance_fs(sbi, false);
1806 return true;
1807
1808 redirty_out:
1809 folio_redirty_for_writepage(wbc, folio);
1810 folio_unlock(folio);
1811 return false;
1812 }
1813
f2fs_move_node_folio(struct folio * node_folio,int gc_type)1814 int f2fs_move_node_folio(struct folio *node_folio, int gc_type)
1815 {
1816 int err = 0;
1817
1818 if (gc_type == FG_GC) {
1819 struct writeback_control wbc = {
1820 .sync_mode = WB_SYNC_ALL,
1821 .nr_to_write = 1,
1822 };
1823
1824 f2fs_folio_wait_writeback(node_folio, NODE, true, true);
1825
1826 folio_mark_dirty(node_folio);
1827
1828 if (!folio_clear_dirty_for_io(node_folio)) {
1829 err = -EAGAIN;
1830 goto out_page;
1831 }
1832
1833 if (!__write_node_folio(node_folio, false, NULL,
1834 &wbc, false, FS_GC_NODE_IO, NULL))
1835 err = -EAGAIN;
1836 goto release_page;
1837 } else {
1838 /* set page dirty and write it */
1839 if (!folio_test_writeback(node_folio))
1840 folio_mark_dirty(node_folio);
1841 }
1842 out_page:
1843 folio_unlock(node_folio);
1844 release_page:
1845 f2fs_folio_put(node_folio, false);
1846 return err;
1847 }
1848
f2fs_fsync_node_pages(struct f2fs_sb_info * sbi,struct inode * inode,struct writeback_control * wbc,bool atomic,unsigned int * seq_id)1849 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1850 struct writeback_control *wbc, bool atomic,
1851 unsigned int *seq_id)
1852 {
1853 pgoff_t index;
1854 struct folio_batch fbatch;
1855 int ret = 0;
1856 struct folio *last_folio = NULL;
1857 bool marked = false;
1858 nid_t ino = inode->i_ino;
1859 int nr_folios;
1860 int nwritten = 0;
1861
1862 if (atomic) {
1863 last_folio = last_fsync_dnode(sbi, ino);
1864 if (IS_ERR_OR_NULL(last_folio))
1865 return PTR_ERR_OR_ZERO(last_folio);
1866 }
1867 retry:
1868 folio_batch_init(&fbatch);
1869 index = 0;
1870
1871 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1872 (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1873 &fbatch))) {
1874 int i;
1875
1876 for (i = 0; i < nr_folios; i++) {
1877 struct folio *folio = fbatch.folios[i];
1878 bool submitted = false;
1879
1880 if (unlikely(f2fs_cp_error(sbi))) {
1881 f2fs_folio_put(last_folio, false);
1882 folio_batch_release(&fbatch);
1883 ret = -EIO;
1884 goto out;
1885 }
1886
1887 if (!IS_DNODE(folio) || !is_cold_node(folio))
1888 continue;
1889 if (ino_of_node(folio) != ino)
1890 continue;
1891
1892 folio_lock(folio);
1893
1894 if (unlikely(!is_node_folio(folio))) {
1895 continue_unlock:
1896 folio_unlock(folio);
1897 continue;
1898 }
1899 if (ino_of_node(folio) != ino)
1900 goto continue_unlock;
1901
1902 if (!folio_test_dirty(folio) && folio != last_folio) {
1903 /* someone wrote it for us */
1904 goto continue_unlock;
1905 }
1906
1907 f2fs_folio_wait_writeback(folio, NODE, true, true);
1908
1909 set_fsync_mark(folio, 0);
1910 set_dentry_mark(folio, 0);
1911
1912 if (!atomic || folio == last_folio) {
1913 set_fsync_mark(folio, 1);
1914 percpu_counter_inc(&sbi->rf_node_block_count);
1915 if (IS_INODE(folio)) {
1916 if (is_inode_flag_set(inode,
1917 FI_DIRTY_INODE))
1918 f2fs_update_inode(inode, folio);
1919 set_dentry_mark(folio,
1920 f2fs_need_dentry_mark(sbi, ino));
1921 }
1922 /* may be written by other thread */
1923 if (!folio_test_dirty(folio))
1924 folio_mark_dirty(folio);
1925 }
1926
1927 if (!folio_clear_dirty_for_io(folio))
1928 goto continue_unlock;
1929
1930 if (!__write_node_folio(folio, atomic &&
1931 folio == last_folio,
1932 &submitted, wbc, true,
1933 FS_NODE_IO, seq_id)) {
1934 f2fs_folio_put(last_folio, false);
1935 folio_batch_release(&fbatch);
1936 ret = -EIO;
1937 goto out;
1938 }
1939 if (submitted)
1940 nwritten++;
1941
1942 if (folio == last_folio) {
1943 f2fs_folio_put(folio, false);
1944 folio_batch_release(&fbatch);
1945 marked = true;
1946 goto out;
1947 }
1948 }
1949 folio_batch_release(&fbatch);
1950 cond_resched();
1951 }
1952 if (atomic && !marked) {
1953 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1954 ino, last_folio->index);
1955 folio_lock(last_folio);
1956 f2fs_folio_wait_writeback(last_folio, NODE, true, true);
1957 folio_mark_dirty(last_folio);
1958 folio_unlock(last_folio);
1959 goto retry;
1960 }
1961 out:
1962 if (nwritten)
1963 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1964 return ret;
1965 }
1966
f2fs_match_ino(struct inode * inode,unsigned long ino,void * data)1967 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
1968 {
1969 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1970 bool clean;
1971
1972 if (inode->i_ino != ino)
1973 return 0;
1974
1975 if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
1976 return 0;
1977
1978 spin_lock(&sbi->inode_lock[DIRTY_META]);
1979 clean = list_empty(&F2FS_I(inode)->gdirty_list);
1980 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1981
1982 if (clean)
1983 return 0;
1984
1985 inode = igrab(inode);
1986 if (!inode)
1987 return 0;
1988 return 1;
1989 }
1990
flush_dirty_inode(struct folio * folio)1991 static bool flush_dirty_inode(struct folio *folio)
1992 {
1993 struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
1994 struct inode *inode;
1995 nid_t ino = ino_of_node(folio);
1996
1997 inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
1998 if (!inode)
1999 return false;
2000
2001 f2fs_update_inode(inode, folio);
2002 folio_unlock(folio);
2003
2004 iput(inode);
2005 return true;
2006 }
2007
f2fs_flush_inline_data(struct f2fs_sb_info * sbi)2008 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
2009 {
2010 pgoff_t index = 0;
2011 struct folio_batch fbatch;
2012 int nr_folios;
2013
2014 folio_batch_init(&fbatch);
2015
2016 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
2017 (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
2018 &fbatch))) {
2019 int i;
2020
2021 for (i = 0; i < nr_folios; i++) {
2022 struct folio *folio = fbatch.folios[i];
2023
2024 if (!IS_INODE(folio))
2025 continue;
2026
2027 folio_lock(folio);
2028
2029 if (unlikely(!is_node_folio(folio)))
2030 goto unlock;
2031 if (!folio_test_dirty(folio))
2032 goto unlock;
2033
2034 /* flush inline_data, if it's async context. */
2035 if (folio_test_f2fs_inline(folio)) {
2036 folio_clear_f2fs_inline(folio);
2037 folio_unlock(folio);
2038 flush_inline_data(sbi, ino_of_node(folio));
2039 continue;
2040 }
2041 unlock:
2042 folio_unlock(folio);
2043 }
2044 folio_batch_release(&fbatch);
2045 cond_resched();
2046 }
2047 }
2048
f2fs_sync_node_pages(struct f2fs_sb_info * sbi,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type)2049 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
2050 struct writeback_control *wbc,
2051 bool do_balance, enum iostat_type io_type)
2052 {
2053 pgoff_t index;
2054 struct folio_batch fbatch;
2055 int step = 0;
2056 int nwritten = 0;
2057 int ret = 0;
2058 int nr_folios, done = 0;
2059
2060 folio_batch_init(&fbatch);
2061
2062 next_step:
2063 index = 0;
2064
2065 while (!done && (nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi),
2066 &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
2067 &fbatch))) {
2068 int i;
2069
2070 for (i = 0; i < nr_folios; i++) {
2071 struct folio *folio = fbatch.folios[i];
2072 bool submitted = false;
2073
2074 /* give a priority to WB_SYNC threads */
2075 if (atomic_read(&sbi->wb_sync_req[NODE]) &&
2076 wbc->sync_mode == WB_SYNC_NONE) {
2077 done = 1;
2078 break;
2079 }
2080
2081 /*
2082 * flushing sequence with step:
2083 * 0. indirect nodes
2084 * 1. dentry dnodes
2085 * 2. file dnodes
2086 */
2087 if (step == 0 && IS_DNODE(folio))
2088 continue;
2089 if (step == 1 && (!IS_DNODE(folio) ||
2090 is_cold_node(folio)))
2091 continue;
2092 if (step == 2 && (!IS_DNODE(folio) ||
2093 !is_cold_node(folio)))
2094 continue;
2095 lock_node:
2096 if (wbc->sync_mode == WB_SYNC_ALL)
2097 folio_lock(folio);
2098 else if (!folio_trylock(folio))
2099 continue;
2100
2101 if (unlikely(!is_node_folio(folio))) {
2102 continue_unlock:
2103 folio_unlock(folio);
2104 continue;
2105 }
2106
2107 if (!folio_test_dirty(folio)) {
2108 /* someone wrote it for us */
2109 goto continue_unlock;
2110 }
2111
2112 /* flush inline_data/inode, if it's async context. */
2113 if (!do_balance)
2114 goto write_node;
2115
2116 /* flush inline_data */
2117 if (folio_test_f2fs_inline(folio)) {
2118 folio_clear_f2fs_inline(folio);
2119 folio_unlock(folio);
2120 flush_inline_data(sbi, ino_of_node(folio));
2121 goto lock_node;
2122 }
2123
2124 /* flush dirty inode */
2125 if (IS_INODE(folio) && flush_dirty_inode(folio))
2126 goto lock_node;
2127 write_node:
2128 f2fs_folio_wait_writeback(folio, NODE, true, true);
2129
2130 if (!folio_clear_dirty_for_io(folio))
2131 goto continue_unlock;
2132
2133 set_fsync_mark(folio, 0);
2134 set_dentry_mark(folio, 0);
2135
2136 if (!__write_node_folio(folio, false, &submitted,
2137 wbc, do_balance, io_type, NULL)) {
2138 folio_batch_release(&fbatch);
2139 ret = -EIO;
2140 goto out;
2141 }
2142 if (submitted)
2143 nwritten++;
2144
2145 if (--wbc->nr_to_write == 0)
2146 break;
2147 }
2148 folio_batch_release(&fbatch);
2149 cond_resched();
2150
2151 if (wbc->nr_to_write == 0) {
2152 step = 2;
2153 break;
2154 }
2155 }
2156
2157 if (step < 2) {
2158 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2159 wbc->sync_mode == WB_SYNC_NONE && step == 1)
2160 goto out;
2161 step++;
2162 goto next_step;
2163 }
2164 out:
2165 if (nwritten)
2166 f2fs_submit_merged_write(sbi, NODE);
2167
2168 if (unlikely(f2fs_cp_error(sbi)))
2169 return -EIO;
2170 return ret;
2171 }
2172
f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info * sbi,unsigned int seq_id)2173 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
2174 unsigned int seq_id)
2175 {
2176 struct fsync_node_entry *fn;
2177 struct list_head *head = &sbi->fsync_node_list;
2178 unsigned long flags;
2179 unsigned int cur_seq_id = 0;
2180
2181 while (seq_id && cur_seq_id < seq_id) {
2182 struct folio *folio;
2183
2184 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
2185 if (list_empty(head)) {
2186 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2187 break;
2188 }
2189 fn = list_first_entry(head, struct fsync_node_entry, list);
2190 if (fn->seq_id > seq_id) {
2191 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2192 break;
2193 }
2194 cur_seq_id = fn->seq_id;
2195 folio = fn->folio;
2196 folio_get(folio);
2197 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2198
2199 f2fs_folio_wait_writeback(folio, NODE, true, false);
2200
2201 folio_put(folio);
2202 }
2203
2204 return filemap_check_errors(NODE_MAPPING(sbi));
2205 }
2206
f2fs_write_node_pages(struct address_space * mapping,struct writeback_control * wbc)2207 static int f2fs_write_node_pages(struct address_space *mapping,
2208 struct writeback_control *wbc)
2209 {
2210 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2211 struct blk_plug plug;
2212 long diff;
2213
2214 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2215 goto skip_write;
2216
2217 /* balancing f2fs's metadata in background */
2218 f2fs_balance_fs_bg(sbi, true);
2219
2220 /* collect a number of dirty node pages and write together */
2221 if (wbc->sync_mode != WB_SYNC_ALL &&
2222 get_pages(sbi, F2FS_DIRTY_NODES) <
2223 nr_pages_to_skip(sbi, NODE))
2224 goto skip_write;
2225
2226 if (wbc->sync_mode == WB_SYNC_ALL)
2227 atomic_inc(&sbi->wb_sync_req[NODE]);
2228 else if (atomic_read(&sbi->wb_sync_req[NODE])) {
2229 /* to avoid potential deadlock */
2230 if (current->plug)
2231 blk_finish_plug(current->plug);
2232 goto skip_write;
2233 }
2234
2235 trace_f2fs_writepages(mapping->host, wbc, NODE);
2236
2237 diff = nr_pages_to_write(sbi, NODE, wbc);
2238 blk_start_plug(&plug);
2239 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
2240 blk_finish_plug(&plug);
2241 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2242
2243 if (wbc->sync_mode == WB_SYNC_ALL)
2244 atomic_dec(&sbi->wb_sync_req[NODE]);
2245 return 0;
2246
2247 skip_write:
2248 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
2249 trace_f2fs_writepages(mapping->host, wbc, NODE);
2250 return 0;
2251 }
2252
f2fs_dirty_node_folio(struct address_space * mapping,struct folio * folio)2253 static bool f2fs_dirty_node_folio(struct address_space *mapping,
2254 struct folio *folio)
2255 {
2256 trace_f2fs_set_page_dirty(folio, NODE);
2257
2258 if (!folio_test_uptodate(folio))
2259 folio_mark_uptodate(folio);
2260 #ifdef CONFIG_F2FS_CHECK_FS
2261 if (IS_INODE(folio))
2262 f2fs_inode_chksum_set(F2FS_M_SB(mapping), folio);
2263 #endif
2264 if (filemap_dirty_folio(mapping, folio)) {
2265 inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
2266 folio_set_f2fs_reference(folio);
2267 return true;
2268 }
2269 return false;
2270 }
2271
2272 /*
2273 * Structure of the f2fs node operations
2274 */
2275 const struct address_space_operations f2fs_node_aops = {
2276 .writepages = f2fs_write_node_pages,
2277 .dirty_folio = f2fs_dirty_node_folio,
2278 .invalidate_folio = f2fs_invalidate_folio,
2279 .release_folio = f2fs_release_folio,
2280 .migrate_folio = filemap_migrate_folio,
2281 };
2282
__lookup_free_nid_list(struct f2fs_nm_info * nm_i,nid_t n)2283 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
2284 nid_t n)
2285 {
2286 return radix_tree_lookup(&nm_i->free_nid_root, n);
2287 }
2288
__insert_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i)2289 static int __insert_free_nid(struct f2fs_sb_info *sbi,
2290 struct free_nid *i)
2291 {
2292 struct f2fs_nm_info *nm_i = NM_I(sbi);
2293 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2294
2295 if (err)
2296 return err;
2297
2298 nm_i->nid_cnt[FREE_NID]++;
2299 list_add_tail(&i->list, &nm_i->free_nid_list);
2300 return 0;
2301 }
2302
__remove_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state state)2303 static void __remove_free_nid(struct f2fs_sb_info *sbi,
2304 struct free_nid *i, enum nid_state state)
2305 {
2306 struct f2fs_nm_info *nm_i = NM_I(sbi);
2307
2308 f2fs_bug_on(sbi, state != i->state);
2309 nm_i->nid_cnt[state]--;
2310 if (state == FREE_NID)
2311 list_del(&i->list);
2312 radix_tree_delete(&nm_i->free_nid_root, i->nid);
2313 }
2314
__move_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state org_state,enum nid_state dst_state)2315 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2316 enum nid_state org_state, enum nid_state dst_state)
2317 {
2318 struct f2fs_nm_info *nm_i = NM_I(sbi);
2319
2320 f2fs_bug_on(sbi, org_state != i->state);
2321 i->state = dst_state;
2322 nm_i->nid_cnt[org_state]--;
2323 nm_i->nid_cnt[dst_state]++;
2324
2325 switch (dst_state) {
2326 case PREALLOC_NID:
2327 list_del(&i->list);
2328 break;
2329 case FREE_NID:
2330 list_add_tail(&i->list, &nm_i->free_nid_list);
2331 break;
2332 default:
2333 BUG_ON(1);
2334 }
2335 }
2336
update_free_nid_bitmap(struct f2fs_sb_info * sbi,nid_t nid,bool set,bool build)2337 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2338 bool set, bool build)
2339 {
2340 struct f2fs_nm_info *nm_i = NM_I(sbi);
2341 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2342 unsigned int nid_ofs = nid - START_NID(nid);
2343
2344 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2345 return;
2346
2347 if (set) {
2348 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2349 return;
2350 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2351 nm_i->free_nid_count[nat_ofs]++;
2352 } else {
2353 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2354 return;
2355 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2356 if (!build)
2357 nm_i->free_nid_count[nat_ofs]--;
2358 }
2359 }
2360
2361 /* return if the nid is recognized as free */
add_free_nid(struct f2fs_sb_info * sbi,nid_t nid,bool build,bool update)2362 static bool add_free_nid(struct f2fs_sb_info *sbi,
2363 nid_t nid, bool build, bool update)
2364 {
2365 struct f2fs_nm_info *nm_i = NM_I(sbi);
2366 struct free_nid *i, *e;
2367 struct nat_entry *ne;
2368 int err;
2369 bool ret = false;
2370
2371 /* 0 nid should not be used */
2372 if (unlikely(nid == 0))
2373 return false;
2374
2375 if (unlikely(f2fs_check_nid_range(sbi, nid)))
2376 return false;
2377
2378 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL);
2379 i->nid = nid;
2380 i->state = FREE_NID;
2381
2382 err = radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2383 f2fs_bug_on(sbi, err);
2384
2385 err = -EINVAL;
2386
2387 spin_lock(&nm_i->nid_list_lock);
2388
2389 if (build) {
2390 /*
2391 * Thread A Thread B
2392 * - f2fs_create
2393 * - f2fs_new_inode
2394 * - f2fs_alloc_nid
2395 * - __insert_nid_to_list(PREALLOC_NID)
2396 * - f2fs_balance_fs_bg
2397 * - f2fs_build_free_nids
2398 * - __f2fs_build_free_nids
2399 * - scan_nat_page
2400 * - add_free_nid
2401 * - __lookup_nat_cache
2402 * - f2fs_add_link
2403 * - f2fs_init_inode_metadata
2404 * - f2fs_new_inode_folio
2405 * - f2fs_new_node_folio
2406 * - set_node_addr
2407 * - f2fs_alloc_nid_done
2408 * - __remove_nid_from_list(PREALLOC_NID)
2409 * - __insert_nid_to_list(FREE_NID)
2410 */
2411 ne = __lookup_nat_cache(nm_i, nid, false);
2412 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2413 nat_get_blkaddr(ne) != NULL_ADDR))
2414 goto err_out;
2415
2416 e = __lookup_free_nid_list(nm_i, nid);
2417 if (e) {
2418 if (e->state == FREE_NID)
2419 ret = true;
2420 goto err_out;
2421 }
2422 }
2423 ret = true;
2424 err = __insert_free_nid(sbi, i);
2425 err_out:
2426 if (update) {
2427 update_free_nid_bitmap(sbi, nid, ret, build);
2428 if (!build)
2429 nm_i->available_nids++;
2430 }
2431 spin_unlock(&nm_i->nid_list_lock);
2432 radix_tree_preload_end();
2433
2434 if (err)
2435 kmem_cache_free(free_nid_slab, i);
2436 return ret;
2437 }
2438
remove_free_nid(struct f2fs_sb_info * sbi,nid_t nid)2439 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2440 {
2441 struct f2fs_nm_info *nm_i = NM_I(sbi);
2442 struct free_nid *i;
2443 bool need_free = false;
2444
2445 spin_lock(&nm_i->nid_list_lock);
2446 i = __lookup_free_nid_list(nm_i, nid);
2447 if (i && i->state == FREE_NID) {
2448 __remove_free_nid(sbi, i, FREE_NID);
2449 need_free = true;
2450 }
2451 spin_unlock(&nm_i->nid_list_lock);
2452
2453 if (need_free)
2454 kmem_cache_free(free_nid_slab, i);
2455 }
2456
scan_nat_page(struct f2fs_sb_info * sbi,struct f2fs_nat_block * nat_blk,nid_t start_nid)2457 static int scan_nat_page(struct f2fs_sb_info *sbi,
2458 struct f2fs_nat_block *nat_blk, nid_t start_nid)
2459 {
2460 struct f2fs_nm_info *nm_i = NM_I(sbi);
2461 block_t blk_addr;
2462 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2463 int i;
2464
2465 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2466
2467 i = start_nid % NAT_ENTRY_PER_BLOCK;
2468
2469 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2470 if (unlikely(start_nid >= nm_i->max_nid))
2471 break;
2472
2473 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2474
2475 if (blk_addr == NEW_ADDR)
2476 return -EFSCORRUPTED;
2477
2478 if (blk_addr == NULL_ADDR) {
2479 add_free_nid(sbi, start_nid, true, true);
2480 } else {
2481 spin_lock(&NM_I(sbi)->nid_list_lock);
2482 update_free_nid_bitmap(sbi, start_nid, false, true);
2483 spin_unlock(&NM_I(sbi)->nid_list_lock);
2484 }
2485 }
2486
2487 return 0;
2488 }
2489
scan_curseg_cache(struct f2fs_sb_info * sbi)2490 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2491 {
2492 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2493 struct f2fs_journal *journal = curseg->journal;
2494 int i;
2495
2496 down_read(&curseg->journal_rwsem);
2497 for (i = 0; i < nats_in_cursum(journal); i++) {
2498 block_t addr;
2499 nid_t nid;
2500
2501 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2502 nid = le32_to_cpu(nid_in_journal(journal, i));
2503 if (addr == NULL_ADDR)
2504 add_free_nid(sbi, nid, true, false);
2505 else
2506 remove_free_nid(sbi, nid);
2507 }
2508 up_read(&curseg->journal_rwsem);
2509 }
2510
scan_free_nid_bits(struct f2fs_sb_info * sbi)2511 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2512 {
2513 struct f2fs_nm_info *nm_i = NM_I(sbi);
2514 unsigned int i, idx;
2515 nid_t nid;
2516
2517 f2fs_down_read(&nm_i->nat_tree_lock);
2518
2519 for (i = 0; i < nm_i->nat_blocks; i++) {
2520 if (!test_bit_le(i, nm_i->nat_block_bitmap))
2521 continue;
2522 if (!nm_i->free_nid_count[i])
2523 continue;
2524 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2525 idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2526 NAT_ENTRY_PER_BLOCK, idx);
2527 if (idx >= NAT_ENTRY_PER_BLOCK)
2528 break;
2529
2530 nid = i * NAT_ENTRY_PER_BLOCK + idx;
2531 add_free_nid(sbi, nid, true, false);
2532
2533 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2534 goto out;
2535 }
2536 }
2537 out:
2538 scan_curseg_cache(sbi);
2539
2540 f2fs_up_read(&nm_i->nat_tree_lock);
2541 }
2542
__f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2543 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2544 bool sync, bool mount)
2545 {
2546 struct f2fs_nm_info *nm_i = NM_I(sbi);
2547 int i = 0, ret;
2548 nid_t nid = nm_i->next_scan_nid;
2549
2550 if (unlikely(nid >= nm_i->max_nid))
2551 nid = 0;
2552
2553 if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
2554 nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
2555
2556 /* Enough entries */
2557 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2558 return 0;
2559
2560 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2561 return 0;
2562
2563 if (!mount) {
2564 /* try to find free nids in free_nid_bitmap */
2565 scan_free_nid_bits(sbi);
2566
2567 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2568 return 0;
2569 }
2570
2571 /* readahead nat pages to be scanned */
2572 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2573 META_NAT, true);
2574
2575 f2fs_down_read(&nm_i->nat_tree_lock);
2576
2577 while (1) {
2578 if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2579 nm_i->nat_block_bitmap)) {
2580 struct folio *folio = get_current_nat_folio(sbi, nid);
2581
2582 if (IS_ERR(folio)) {
2583 ret = PTR_ERR(folio);
2584 } else {
2585 ret = scan_nat_page(sbi, folio_address(folio),
2586 nid);
2587 f2fs_folio_put(folio, true);
2588 }
2589
2590 if (ret) {
2591 f2fs_up_read(&nm_i->nat_tree_lock);
2592
2593 if (ret == -EFSCORRUPTED) {
2594 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2595 set_sbi_flag(sbi, SBI_NEED_FSCK);
2596 f2fs_handle_error(sbi,
2597 ERROR_INCONSISTENT_NAT);
2598 }
2599
2600 return ret;
2601 }
2602 }
2603
2604 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2605 if (unlikely(nid >= nm_i->max_nid))
2606 nid = 0;
2607
2608 if (++i >= FREE_NID_PAGES)
2609 break;
2610 }
2611
2612 /* go to the next free nat pages to find free nids abundantly */
2613 nm_i->next_scan_nid = nid;
2614
2615 /* find free nids from current sum_pages */
2616 scan_curseg_cache(sbi);
2617
2618 f2fs_up_read(&nm_i->nat_tree_lock);
2619
2620 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2621 nm_i->ra_nid_pages, META_NAT, false);
2622
2623 return 0;
2624 }
2625
f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2626 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2627 {
2628 int ret;
2629
2630 mutex_lock(&NM_I(sbi)->build_lock);
2631 ret = __f2fs_build_free_nids(sbi, sync, mount);
2632 mutex_unlock(&NM_I(sbi)->build_lock);
2633
2634 return ret;
2635 }
2636
2637 /*
2638 * If this function returns success, caller can obtain a new nid
2639 * from second parameter of this function.
2640 * The returned nid could be used ino as well as nid when inode is created.
2641 */
f2fs_alloc_nid(struct f2fs_sb_info * sbi,nid_t * nid)2642 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2643 {
2644 struct f2fs_nm_info *nm_i = NM_I(sbi);
2645 struct free_nid *i = NULL;
2646 retry:
2647 if (time_to_inject(sbi, FAULT_ALLOC_NID))
2648 return false;
2649
2650 spin_lock(&nm_i->nid_list_lock);
2651
2652 if (unlikely(nm_i->available_nids == 0)) {
2653 spin_unlock(&nm_i->nid_list_lock);
2654 return false;
2655 }
2656
2657 /* We should not use stale free nids created by f2fs_build_free_nids */
2658 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2659 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2660 i = list_first_entry(&nm_i->free_nid_list,
2661 struct free_nid, list);
2662
2663 if (unlikely(is_invalid_nid(sbi, i->nid))) {
2664 spin_unlock(&nm_i->nid_list_lock);
2665 f2fs_err(sbi, "Corrupted nid %u in free_nid_list",
2666 i->nid);
2667 f2fs_stop_checkpoint(sbi, false,
2668 STOP_CP_REASON_CORRUPTED_NID);
2669 return false;
2670 }
2671
2672 *nid = i->nid;
2673
2674 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2675 nm_i->available_nids--;
2676
2677 update_free_nid_bitmap(sbi, *nid, false, false);
2678
2679 spin_unlock(&nm_i->nid_list_lock);
2680 return true;
2681 }
2682 spin_unlock(&nm_i->nid_list_lock);
2683
2684 /* Let's scan nat pages and its caches to get free nids */
2685 if (!f2fs_build_free_nids(sbi, true, false))
2686 goto retry;
2687 return false;
2688 }
2689
2690 /*
2691 * f2fs_alloc_nid() should be called prior to this function.
2692 */
f2fs_alloc_nid_done(struct f2fs_sb_info * sbi,nid_t nid)2693 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2694 {
2695 struct f2fs_nm_info *nm_i = NM_I(sbi);
2696 struct free_nid *i;
2697
2698 spin_lock(&nm_i->nid_list_lock);
2699 i = __lookup_free_nid_list(nm_i, nid);
2700 f2fs_bug_on(sbi, !i);
2701 __remove_free_nid(sbi, i, PREALLOC_NID);
2702 spin_unlock(&nm_i->nid_list_lock);
2703
2704 kmem_cache_free(free_nid_slab, i);
2705 }
2706
2707 /*
2708 * f2fs_alloc_nid() should be called prior to this function.
2709 */
f2fs_alloc_nid_failed(struct f2fs_sb_info * sbi,nid_t nid)2710 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2711 {
2712 struct f2fs_nm_info *nm_i = NM_I(sbi);
2713 struct free_nid *i;
2714 bool need_free = false;
2715
2716 if (!nid)
2717 return;
2718
2719 spin_lock(&nm_i->nid_list_lock);
2720 i = __lookup_free_nid_list(nm_i, nid);
2721 f2fs_bug_on(sbi, !i);
2722
2723 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2724 __remove_free_nid(sbi, i, PREALLOC_NID);
2725 need_free = true;
2726 } else {
2727 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2728 }
2729
2730 nm_i->available_nids++;
2731
2732 update_free_nid_bitmap(sbi, nid, true, false);
2733
2734 spin_unlock(&nm_i->nid_list_lock);
2735
2736 if (need_free)
2737 kmem_cache_free(free_nid_slab, i);
2738 }
2739
f2fs_try_to_free_nids(struct f2fs_sb_info * sbi,int nr_shrink)2740 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2741 {
2742 struct f2fs_nm_info *nm_i = NM_I(sbi);
2743 int nr = nr_shrink;
2744
2745 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2746 return 0;
2747
2748 if (!mutex_trylock(&nm_i->build_lock))
2749 return 0;
2750
2751 while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2752 struct free_nid *i, *next;
2753 unsigned int batch = SHRINK_NID_BATCH_SIZE;
2754
2755 spin_lock(&nm_i->nid_list_lock);
2756 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2757 if (!nr_shrink || !batch ||
2758 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2759 break;
2760 __remove_free_nid(sbi, i, FREE_NID);
2761 kmem_cache_free(free_nid_slab, i);
2762 nr_shrink--;
2763 batch--;
2764 }
2765 spin_unlock(&nm_i->nid_list_lock);
2766 }
2767
2768 mutex_unlock(&nm_i->build_lock);
2769
2770 return nr - nr_shrink;
2771 }
2772
f2fs_recover_inline_xattr(struct inode * inode,struct folio * folio)2773 int f2fs_recover_inline_xattr(struct inode *inode, struct folio *folio)
2774 {
2775 void *src_addr, *dst_addr;
2776 size_t inline_size;
2777 struct folio *ifolio;
2778 struct f2fs_inode *ri;
2779
2780 ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
2781 if (IS_ERR(ifolio))
2782 return PTR_ERR(ifolio);
2783
2784 ri = F2FS_INODE(folio);
2785 if (ri->i_inline & F2FS_INLINE_XATTR) {
2786 if (!f2fs_has_inline_xattr(inode)) {
2787 set_inode_flag(inode, FI_INLINE_XATTR);
2788 stat_inc_inline_xattr(inode);
2789 }
2790 } else {
2791 if (f2fs_has_inline_xattr(inode)) {
2792 stat_dec_inline_xattr(inode);
2793 clear_inode_flag(inode, FI_INLINE_XATTR);
2794 }
2795 goto update_inode;
2796 }
2797
2798 dst_addr = inline_xattr_addr(inode, ifolio);
2799 src_addr = inline_xattr_addr(inode, folio);
2800 inline_size = inline_xattr_size(inode);
2801
2802 f2fs_folio_wait_writeback(ifolio, NODE, true, true);
2803 memcpy(dst_addr, src_addr, inline_size);
2804 update_inode:
2805 f2fs_update_inode(inode, ifolio);
2806 f2fs_folio_put(ifolio, true);
2807 return 0;
2808 }
2809
f2fs_recover_xattr_data(struct inode * inode,struct folio * folio)2810 int f2fs_recover_xattr_data(struct inode *inode, struct folio *folio)
2811 {
2812 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2813 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2814 nid_t new_xnid;
2815 struct dnode_of_data dn;
2816 struct node_info ni;
2817 struct folio *xfolio;
2818 int err;
2819
2820 if (!prev_xnid)
2821 goto recover_xnid;
2822
2823 /* 1: invalidate the previous xattr nid */
2824 err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
2825 if (err)
2826 return err;
2827
2828 f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
2829 dec_valid_node_count(sbi, inode, false);
2830 set_node_addr(sbi, &ni, NULL_ADDR, false);
2831
2832 recover_xnid:
2833 /* 2: update xattr nid in inode */
2834 if (!f2fs_alloc_nid(sbi, &new_xnid))
2835 return -ENOSPC;
2836
2837 set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2838 xfolio = f2fs_new_node_folio(&dn, XATTR_NODE_OFFSET);
2839 if (IS_ERR(xfolio)) {
2840 f2fs_alloc_nid_failed(sbi, new_xnid);
2841 return PTR_ERR(xfolio);
2842 }
2843
2844 f2fs_alloc_nid_done(sbi, new_xnid);
2845 f2fs_update_inode_page(inode);
2846
2847 /* 3: update and set xattr node page dirty */
2848 if (folio) {
2849 memcpy(F2FS_NODE(xfolio), F2FS_NODE(folio),
2850 VALID_XATTR_BLOCK_SIZE);
2851 folio_mark_dirty(xfolio);
2852 }
2853 f2fs_folio_put(xfolio, true);
2854
2855 return 0;
2856 }
2857
f2fs_recover_inode_page(struct f2fs_sb_info * sbi,struct folio * folio)2858 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct folio *folio)
2859 {
2860 struct f2fs_inode *src, *dst;
2861 nid_t ino = ino_of_node(folio);
2862 struct node_info old_ni, new_ni;
2863 struct folio *ifolio;
2864 int err;
2865
2866 err = f2fs_get_node_info(sbi, ino, &old_ni, false);
2867 if (err)
2868 return err;
2869
2870 if (unlikely(old_ni.blk_addr != NULL_ADDR))
2871 return -EINVAL;
2872 retry:
2873 ifolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), ino, false);
2874 if (IS_ERR(ifolio)) {
2875 memalloc_retry_wait(GFP_NOFS);
2876 goto retry;
2877 }
2878
2879 /* Should not use this inode from free nid list */
2880 remove_free_nid(sbi, ino);
2881
2882 if (!folio_test_uptodate(ifolio))
2883 folio_mark_uptodate(ifolio);
2884 fill_node_footer(ifolio, ino, ino, 0, true);
2885 set_cold_node(ifolio, false);
2886
2887 src = F2FS_INODE(folio);
2888 dst = F2FS_INODE(ifolio);
2889
2890 memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
2891 dst->i_size = 0;
2892 dst->i_blocks = cpu_to_le64(1);
2893 dst->i_links = cpu_to_le32(1);
2894 dst->i_xattr_nid = 0;
2895 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2896 if (dst->i_inline & F2FS_EXTRA_ATTR) {
2897 dst->i_extra_isize = src->i_extra_isize;
2898
2899 if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
2900 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2901 i_inline_xattr_size))
2902 dst->i_inline_xattr_size = src->i_inline_xattr_size;
2903
2904 if (f2fs_sb_has_project_quota(sbi) &&
2905 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2906 i_projid))
2907 dst->i_projid = src->i_projid;
2908
2909 if (f2fs_sb_has_inode_crtime(sbi) &&
2910 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2911 i_crtime_nsec)) {
2912 dst->i_crtime = src->i_crtime;
2913 dst->i_crtime_nsec = src->i_crtime_nsec;
2914 }
2915 }
2916
2917 new_ni = old_ni;
2918 new_ni.ino = ino;
2919
2920 if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2921 WARN_ON(1);
2922 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2923 inc_valid_inode_count(sbi);
2924 folio_mark_dirty(ifolio);
2925 f2fs_folio_put(ifolio, true);
2926 return 0;
2927 }
2928
f2fs_restore_node_summary(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_summary_block * sum)2929 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2930 unsigned int segno, struct f2fs_summary_block *sum)
2931 {
2932 struct f2fs_node *rn;
2933 struct f2fs_summary *sum_entry;
2934 block_t addr;
2935 int i, idx, last_offset, nrpages;
2936
2937 /* scan the node segment */
2938 last_offset = BLKS_PER_SEG(sbi);
2939 addr = START_BLOCK(sbi, segno);
2940 sum_entry = &sum->entries[0];
2941
2942 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2943 nrpages = bio_max_segs(last_offset - i);
2944
2945 /* readahead node pages */
2946 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2947
2948 for (idx = addr; idx < addr + nrpages; idx++) {
2949 struct folio *folio = f2fs_get_tmp_folio(sbi, idx);
2950
2951 if (IS_ERR(folio))
2952 return PTR_ERR(folio);
2953
2954 rn = F2FS_NODE(folio);
2955 sum_entry->nid = rn->footer.nid;
2956 sum_entry->version = 0;
2957 sum_entry->ofs_in_node = 0;
2958 sum_entry++;
2959 f2fs_folio_put(folio, true);
2960 }
2961
2962 invalidate_mapping_pages(META_MAPPING(sbi), addr,
2963 addr + nrpages);
2964 }
2965 return 0;
2966 }
2967
remove_nats_in_journal(struct f2fs_sb_info * sbi)2968 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2969 {
2970 struct f2fs_nm_info *nm_i = NM_I(sbi);
2971 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2972 struct f2fs_journal *journal = curseg->journal;
2973 int i;
2974 bool init_dirty;
2975
2976 down_write(&curseg->journal_rwsem);
2977 for (i = 0; i < nats_in_cursum(journal); i++) {
2978 struct nat_entry *ne;
2979 struct f2fs_nat_entry raw_ne;
2980 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2981
2982 if (f2fs_check_nid_range(sbi, nid))
2983 continue;
2984
2985 init_dirty = false;
2986
2987 raw_ne = nat_in_journal(journal, i);
2988
2989 ne = __lookup_nat_cache(nm_i, nid, true);
2990 if (!ne) {
2991 init_dirty = true;
2992 ne = __alloc_nat_entry(sbi, nid, true);
2993 __init_nat_entry(nm_i, ne, &raw_ne, true, true);
2994 }
2995
2996 /*
2997 * if a free nat in journal has not been used after last
2998 * checkpoint, we should remove it from available nids,
2999 * since later we will add it again.
3000 */
3001 if (!get_nat_flag(ne, IS_DIRTY) &&
3002 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
3003 spin_lock(&nm_i->nid_list_lock);
3004 nm_i->available_nids--;
3005 spin_unlock(&nm_i->nid_list_lock);
3006 }
3007
3008 __set_nat_cache_dirty(nm_i, ne, init_dirty);
3009 }
3010 update_nats_in_cursum(journal, -i);
3011 up_write(&curseg->journal_rwsem);
3012 }
3013
__adjust_nat_entry_set(struct nat_entry_set * nes,struct list_head * head,int max)3014 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
3015 struct list_head *head, int max)
3016 {
3017 struct nat_entry_set *cur;
3018
3019 if (nes->entry_cnt >= max)
3020 goto add_out;
3021
3022 list_for_each_entry(cur, head, set_list) {
3023 if (cur->entry_cnt >= nes->entry_cnt) {
3024 list_add(&nes->set_list, cur->set_list.prev);
3025 return;
3026 }
3027 }
3028 add_out:
3029 list_add_tail(&nes->set_list, head);
3030 }
3031
__update_nat_bits(struct f2fs_sb_info * sbi,nid_t start_nid,const struct f2fs_nat_block * nat_blk)3032 static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
3033 const struct f2fs_nat_block *nat_blk)
3034 {
3035 struct f2fs_nm_info *nm_i = NM_I(sbi);
3036 unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
3037 int valid = 0;
3038 int i = 0;
3039
3040 if (!enabled_nat_bits(sbi, NULL))
3041 return;
3042
3043 if (nat_index == 0) {
3044 valid = 1;
3045 i = 1;
3046 }
3047 for (; i < NAT_ENTRY_PER_BLOCK; i++) {
3048 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
3049 valid++;
3050 }
3051 if (valid == 0) {
3052 __set_bit_le(nat_index, nm_i->empty_nat_bits);
3053 __clear_bit_le(nat_index, nm_i->full_nat_bits);
3054 return;
3055 }
3056
3057 __clear_bit_le(nat_index, nm_i->empty_nat_bits);
3058 if (valid == NAT_ENTRY_PER_BLOCK)
3059 __set_bit_le(nat_index, nm_i->full_nat_bits);
3060 else
3061 __clear_bit_le(nat_index, nm_i->full_nat_bits);
3062 }
3063
__flush_nat_entry_set(struct f2fs_sb_info * sbi,struct nat_entry_set * set,struct cp_control * cpc)3064 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
3065 struct nat_entry_set *set, struct cp_control *cpc)
3066 {
3067 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3068 struct f2fs_journal *journal = curseg->journal;
3069 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
3070 bool to_journal = true;
3071 struct f2fs_nat_block *nat_blk;
3072 struct nat_entry *ne, *cur;
3073 struct folio *folio = NULL;
3074
3075 /*
3076 * there are two steps to flush nat entries:
3077 * #1, flush nat entries to journal in current hot data summary block.
3078 * #2, flush nat entries to nat page.
3079 */
3080 if (enabled_nat_bits(sbi, cpc) ||
3081 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
3082 to_journal = false;
3083
3084 if (to_journal) {
3085 down_write(&curseg->journal_rwsem);
3086 } else {
3087 folio = get_next_nat_folio(sbi, start_nid);
3088 if (IS_ERR(folio))
3089 return PTR_ERR(folio);
3090
3091 nat_blk = folio_address(folio);
3092 f2fs_bug_on(sbi, !nat_blk);
3093 }
3094
3095 /* flush dirty nats in nat entry set */
3096 list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
3097 struct f2fs_nat_entry *raw_ne;
3098 nid_t nid = nat_get_nid(ne);
3099 int offset;
3100
3101 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
3102
3103 if (to_journal) {
3104 offset = f2fs_lookup_journal_in_cursum(journal,
3105 NAT_JOURNAL, nid, 1);
3106 f2fs_bug_on(sbi, offset < 0);
3107 raw_ne = &nat_in_journal(journal, offset);
3108 nid_in_journal(journal, offset) = cpu_to_le32(nid);
3109 } else {
3110 raw_ne = &nat_blk->entries[nid - start_nid];
3111 }
3112 raw_nat_from_node_info(raw_ne, &ne->ni);
3113 nat_reset_flag(ne);
3114 __clear_nat_cache_dirty(NM_I(sbi), set, ne);
3115 if (nat_get_blkaddr(ne) == NULL_ADDR) {
3116 add_free_nid(sbi, nid, false, true);
3117 } else {
3118 spin_lock(&NM_I(sbi)->nid_list_lock);
3119 update_free_nid_bitmap(sbi, nid, false, false);
3120 spin_unlock(&NM_I(sbi)->nid_list_lock);
3121 }
3122 }
3123
3124 if (to_journal) {
3125 up_write(&curseg->journal_rwsem);
3126 } else {
3127 __update_nat_bits(sbi, start_nid, nat_blk);
3128 f2fs_folio_put(folio, true);
3129 }
3130
3131 /* Allow dirty nats by node block allocation in write_begin */
3132 if (!set->entry_cnt) {
3133 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
3134 kmem_cache_free(nat_entry_set_slab, set);
3135 }
3136 return 0;
3137 }
3138
3139 /*
3140 * This function is called during the checkpointing process.
3141 */
f2fs_flush_nat_entries(struct f2fs_sb_info * sbi,struct cp_control * cpc)3142 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3143 {
3144 struct f2fs_nm_info *nm_i = NM_I(sbi);
3145 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3146 struct f2fs_journal *journal = curseg->journal;
3147 struct nat_entry_set *setvec[NAT_VEC_SIZE];
3148 struct nat_entry_set *set, *tmp;
3149 unsigned int found;
3150 nid_t set_idx = 0;
3151 LIST_HEAD(sets);
3152 int err = 0;
3153
3154 /*
3155 * during unmount, let's flush nat_bits before checking
3156 * nat_cnt[DIRTY_NAT].
3157 */
3158 if (enabled_nat_bits(sbi, cpc)) {
3159 f2fs_down_write(&nm_i->nat_tree_lock);
3160 remove_nats_in_journal(sbi);
3161 f2fs_up_write(&nm_i->nat_tree_lock);
3162 }
3163
3164 if (!nm_i->nat_cnt[DIRTY_NAT])
3165 return 0;
3166
3167 f2fs_down_write(&nm_i->nat_tree_lock);
3168
3169 /*
3170 * if there are no enough space in journal to store dirty nat
3171 * entries, remove all entries from journal and merge them
3172 * into nat entry set.
3173 */
3174 if (enabled_nat_bits(sbi, cpc) ||
3175 !__has_cursum_space(journal,
3176 nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
3177 remove_nats_in_journal(sbi);
3178
3179 while ((found = __gang_lookup_nat_set(nm_i,
3180 set_idx, NAT_VEC_SIZE, setvec))) {
3181 unsigned idx;
3182
3183 set_idx = setvec[found - 1]->set + 1;
3184 for (idx = 0; idx < found; idx++)
3185 __adjust_nat_entry_set(setvec[idx], &sets,
3186 MAX_NAT_JENTRIES(journal));
3187 }
3188
3189 /* flush dirty nats in nat entry set */
3190 list_for_each_entry_safe(set, tmp, &sets, set_list) {
3191 err = __flush_nat_entry_set(sbi, set, cpc);
3192 if (err)
3193 break;
3194 }
3195
3196 f2fs_up_write(&nm_i->nat_tree_lock);
3197 /* Allow dirty nats by node block allocation in write_begin */
3198
3199 return err;
3200 }
3201
__get_nat_bitmaps(struct f2fs_sb_info * sbi)3202 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
3203 {
3204 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3205 struct f2fs_nm_info *nm_i = NM_I(sbi);
3206 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
3207 unsigned int i;
3208 __u64 cp_ver = cur_cp_version(ckpt);
3209 block_t nat_bits_addr;
3210
3211 if (!enabled_nat_bits(sbi, NULL))
3212 return 0;
3213
3214 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3215 nm_i->nat_bits = f2fs_kvzalloc(sbi,
3216 F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL);
3217 if (!nm_i->nat_bits)
3218 return -ENOMEM;
3219
3220 nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) -
3221 nm_i->nat_bits_blocks;
3222 for (i = 0; i < nm_i->nat_bits_blocks; i++) {
3223 struct folio *folio;
3224
3225 folio = f2fs_get_meta_folio(sbi, nat_bits_addr++);
3226 if (IS_ERR(folio))
3227 return PTR_ERR(folio);
3228
3229 memcpy(nm_i->nat_bits + F2FS_BLK_TO_BYTES(i),
3230 folio_address(folio), F2FS_BLKSIZE);
3231 f2fs_folio_put(folio, true);
3232 }
3233
3234 cp_ver |= (cur_cp_crc(ckpt) << 32);
3235 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
3236 disable_nat_bits(sbi, true);
3237 return 0;
3238 }
3239
3240 nm_i->full_nat_bits = nm_i->nat_bits + 8;
3241 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
3242
3243 f2fs_notice(sbi, "Found nat_bits in checkpoint");
3244 return 0;
3245 }
3246
load_free_nid_bitmap(struct f2fs_sb_info * sbi)3247 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
3248 {
3249 struct f2fs_nm_info *nm_i = NM_I(sbi);
3250 unsigned int i = 0;
3251 nid_t nid, last_nid;
3252
3253 if (!enabled_nat_bits(sbi, NULL))
3254 return;
3255
3256 for (i = 0; i < nm_i->nat_blocks; i++) {
3257 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
3258 if (i >= nm_i->nat_blocks)
3259 break;
3260
3261 __set_bit_le(i, nm_i->nat_block_bitmap);
3262
3263 nid = i * NAT_ENTRY_PER_BLOCK;
3264 last_nid = nid + NAT_ENTRY_PER_BLOCK;
3265
3266 spin_lock(&NM_I(sbi)->nid_list_lock);
3267 for (; nid < last_nid; nid++)
3268 update_free_nid_bitmap(sbi, nid, true, true);
3269 spin_unlock(&NM_I(sbi)->nid_list_lock);
3270 }
3271
3272 for (i = 0; i < nm_i->nat_blocks; i++) {
3273 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
3274 if (i >= nm_i->nat_blocks)
3275 break;
3276
3277 __set_bit_le(i, nm_i->nat_block_bitmap);
3278 }
3279 }
3280
init_node_manager(struct f2fs_sb_info * sbi)3281 static int init_node_manager(struct f2fs_sb_info *sbi)
3282 {
3283 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
3284 struct f2fs_nm_info *nm_i = NM_I(sbi);
3285 unsigned char *version_bitmap;
3286 unsigned int nat_segs;
3287 int err;
3288
3289 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3290
3291 /* segment_count_nat includes pair segment so divide to 2. */
3292 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
3293 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
3294 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3295
3296 /* not used nids: 0, node, meta, (and root counted as valid node) */
3297 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3298 F2FS_RESERVED_NODE_NUM;
3299 nm_i->nid_cnt[FREE_NID] = 0;
3300 nm_i->nid_cnt[PREALLOC_NID] = 0;
3301 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3302 nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
3303 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
3304 nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS;
3305
3306 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
3307 INIT_LIST_HEAD(&nm_i->free_nid_list);
3308 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3309 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3310 INIT_LIST_HEAD(&nm_i->nat_entries);
3311 spin_lock_init(&nm_i->nat_list_lock);
3312
3313 mutex_init(&nm_i->build_lock);
3314 spin_lock_init(&nm_i->nid_list_lock);
3315 init_f2fs_rwsem(&nm_i->nat_tree_lock);
3316
3317 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
3318 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3319 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
3320 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
3321 GFP_KERNEL);
3322 if (!nm_i->nat_bitmap)
3323 return -ENOMEM;
3324
3325 if (!test_opt(sbi, NAT_BITS))
3326 disable_nat_bits(sbi, true);
3327
3328 err = __get_nat_bitmaps(sbi);
3329 if (err)
3330 return err;
3331
3332 #ifdef CONFIG_F2FS_CHECK_FS
3333 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3334 GFP_KERNEL);
3335 if (!nm_i->nat_bitmap_mir)
3336 return -ENOMEM;
3337 #endif
3338
3339 return 0;
3340 }
3341
init_free_nid_cache(struct f2fs_sb_info * sbi)3342 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
3343 {
3344 struct f2fs_nm_info *nm_i = NM_I(sbi);
3345 int i;
3346
3347 nm_i->free_nid_bitmap =
3348 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
3349 nm_i->nat_blocks),
3350 GFP_KERNEL);
3351 if (!nm_i->free_nid_bitmap)
3352 return -ENOMEM;
3353
3354 for (i = 0; i < nm_i->nat_blocks; i++) {
3355 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3356 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3357 if (!nm_i->free_nid_bitmap[i])
3358 return -ENOMEM;
3359 }
3360
3361 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3362 GFP_KERNEL);
3363 if (!nm_i->nat_block_bitmap)
3364 return -ENOMEM;
3365
3366 nm_i->free_nid_count =
3367 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3368 nm_i->nat_blocks),
3369 GFP_KERNEL);
3370 if (!nm_i->free_nid_count)
3371 return -ENOMEM;
3372 return 0;
3373 }
3374
f2fs_build_node_manager(struct f2fs_sb_info * sbi)3375 int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3376 {
3377 int err;
3378
3379 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3380 GFP_KERNEL);
3381 if (!sbi->nm_info)
3382 return -ENOMEM;
3383
3384 err = init_node_manager(sbi);
3385 if (err)
3386 return err;
3387
3388 err = init_free_nid_cache(sbi);
3389 if (err)
3390 return err;
3391
3392 /* load free nid status from nat_bits table */
3393 load_free_nid_bitmap(sbi);
3394
3395 return f2fs_build_free_nids(sbi, true, true);
3396 }
3397
f2fs_destroy_node_manager(struct f2fs_sb_info * sbi)3398 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3399 {
3400 struct f2fs_nm_info *nm_i = NM_I(sbi);
3401 struct free_nid *i, *next_i;
3402 void *vec[NAT_VEC_SIZE];
3403 struct nat_entry **natvec = (struct nat_entry **)vec;
3404 struct nat_entry_set **setvec = (struct nat_entry_set **)vec;
3405 nid_t nid = 0;
3406 unsigned int found;
3407
3408 if (!nm_i)
3409 return;
3410
3411 /* destroy free nid list */
3412 spin_lock(&nm_i->nid_list_lock);
3413 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3414 __remove_free_nid(sbi, i, FREE_NID);
3415 spin_unlock(&nm_i->nid_list_lock);
3416 kmem_cache_free(free_nid_slab, i);
3417 spin_lock(&nm_i->nid_list_lock);
3418 }
3419 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3420 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3421 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3422 spin_unlock(&nm_i->nid_list_lock);
3423
3424 /* destroy nat cache */
3425 f2fs_down_write(&nm_i->nat_tree_lock);
3426 while ((found = __gang_lookup_nat_cache(nm_i,
3427 nid, NAT_VEC_SIZE, natvec))) {
3428 unsigned idx;
3429
3430 nid = nat_get_nid(natvec[found - 1]) + 1;
3431 for (idx = 0; idx < found; idx++) {
3432 spin_lock(&nm_i->nat_list_lock);
3433 list_del(&natvec[idx]->list);
3434 spin_unlock(&nm_i->nat_list_lock);
3435
3436 __del_from_nat_cache(nm_i, natvec[idx]);
3437 }
3438 }
3439 f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
3440
3441 /* destroy nat set cache */
3442 nid = 0;
3443 memset(vec, 0, sizeof(void *) * NAT_VEC_SIZE);
3444 while ((found = __gang_lookup_nat_set(nm_i,
3445 nid, NAT_VEC_SIZE, setvec))) {
3446 unsigned idx;
3447
3448 nid = setvec[found - 1]->set + 1;
3449 for (idx = 0; idx < found; idx++) {
3450 /* entry_cnt is not zero, when cp_error was occurred */
3451 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3452 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3453 kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3454 }
3455 }
3456 f2fs_up_write(&nm_i->nat_tree_lock);
3457
3458 kvfree(nm_i->nat_block_bitmap);
3459 if (nm_i->free_nid_bitmap) {
3460 int i;
3461
3462 for (i = 0; i < nm_i->nat_blocks; i++)
3463 kvfree(nm_i->free_nid_bitmap[i]);
3464 kvfree(nm_i->free_nid_bitmap);
3465 }
3466 kvfree(nm_i->free_nid_count);
3467
3468 kfree(nm_i->nat_bitmap);
3469 kvfree(nm_i->nat_bits);
3470 #ifdef CONFIG_F2FS_CHECK_FS
3471 kfree(nm_i->nat_bitmap_mir);
3472 #endif
3473 sbi->nm_info = NULL;
3474 kfree(nm_i);
3475 }
3476
f2fs_create_node_manager_caches(void)3477 int __init f2fs_create_node_manager_caches(void)
3478 {
3479 nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
3480 sizeof(struct nat_entry));
3481 if (!nat_entry_slab)
3482 goto fail;
3483
3484 free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
3485 sizeof(struct free_nid));
3486 if (!free_nid_slab)
3487 goto destroy_nat_entry;
3488
3489 nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
3490 sizeof(struct nat_entry_set));
3491 if (!nat_entry_set_slab)
3492 goto destroy_free_nid;
3493
3494 fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
3495 sizeof(struct fsync_node_entry));
3496 if (!fsync_node_entry_slab)
3497 goto destroy_nat_entry_set;
3498 return 0;
3499
3500 destroy_nat_entry_set:
3501 kmem_cache_destroy(nat_entry_set_slab);
3502 destroy_free_nid:
3503 kmem_cache_destroy(free_nid_slab);
3504 destroy_nat_entry:
3505 kmem_cache_destroy(nat_entry_slab);
3506 fail:
3507 return -ENOMEM;
3508 }
3509
f2fs_destroy_node_manager_caches(void)3510 void f2fs_destroy_node_manager_caches(void)
3511 {
3512 kmem_cache_destroy(fsync_node_entry_slab);
3513 kmem_cache_destroy(nat_entry_set_slab);
3514 kmem_cache_destroy(free_nid_slab);
3515 kmem_cache_destroy(nat_entry_slab);
3516 }
3517