Lines Matching refs:folio
73 static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp, in __page_cache_release() argument
76 if (folio_test_lru(folio)) { in __page_cache_release()
77 folio_lruvec_relock_irqsave(folio, lruvecp, flagsp); in __page_cache_release()
78 lruvec_del_folio(*lruvecp, folio); in __page_cache_release()
79 __folio_clear_lru_flags(folio); in __page_cache_release()
87 static void page_cache_release(struct folio *folio) in page_cache_release() argument
92 __page_cache_release(folio, &lruvec, &flags); in page_cache_release()
97 void __folio_put(struct folio *folio) in __folio_put() argument
99 if (unlikely(folio_is_zone_device(folio))) { in __folio_put()
100 free_zone_device_folio(folio); in __folio_put()
104 if (folio_test_hugetlb(folio)) { in __folio_put()
105 free_huge_folio(folio); in __folio_put()
109 page_cache_release(folio); in __folio_put()
110 folio_unqueue_deferred_split(folio); in __folio_put()
111 mem_cgroup_uncharge(folio); in __folio_put()
112 free_frozen_pages(&folio->page, folio_order(folio)); in __folio_put()
116 typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
118 static void lru_add(struct lruvec *lruvec, struct folio *folio) in lru_add() argument
120 int was_unevictable = folio_test_clear_unevictable(folio); in lru_add()
121 long nr_pages = folio_nr_pages(folio); in lru_add()
123 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in lru_add()
136 if (folio_evictable(folio)) { in lru_add()
140 folio_clear_active(folio); in lru_add()
141 folio_set_unevictable(folio); in lru_add()
149 folio->mlock_count = 0; in lru_add()
154 lruvec_add_folio(lruvec, folio); in lru_add()
155 trace_mm_lru_insertion(folio); in lru_add()
165 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru() local
168 if (move_fn != lru_add && !folio_test_clear_lru(folio)) in folio_batch_move_lru()
171 folio_lruvec_relock_irqsave(folio, &lruvec, &flags); in folio_batch_move_lru()
172 move_fn(lruvec, folio); in folio_batch_move_lru()
174 folio_set_lru(folio); in folio_batch_move_lru()
183 struct folio *folio, move_fn_t move_fn, bool disable_irq) in __folio_batch_add_and_move() argument
187 folio_get(folio); in __folio_batch_add_and_move()
194 if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || in __folio_batch_add_and_move()
195 !folio_may_be_lru_cached(folio) || lru_cache_disabled()) in __folio_batch_add_and_move()
204 #define folio_batch_add_and_move(folio, op) \ argument
207 folio, \
213 static void lru_move_tail(struct lruvec *lruvec, struct folio *folio) in lru_move_tail() argument
215 if (folio_test_unevictable(folio)) in lru_move_tail()
218 lruvec_del_folio(lruvec, folio); in lru_move_tail()
219 folio_clear_active(folio); in lru_move_tail()
220 lruvec_add_folio_tail(lruvec, folio); in lru_move_tail()
221 __count_vm_events(PGROTATED, folio_nr_pages(folio)); in lru_move_tail()
231 void folio_rotate_reclaimable(struct folio *folio) in folio_rotate_reclaimable() argument
233 if (folio_test_locked(folio) || folio_test_dirty(folio) || in folio_rotate_reclaimable()
234 folio_test_unevictable(folio) || !folio_test_lru(folio)) in folio_rotate_reclaimable()
237 folio_batch_add_and_move(folio, lru_move_tail); in folio_rotate_reclaimable()
294 void lru_note_cost_refault(struct folio *folio) in lru_note_cost_refault() argument
298 lruvec = folio_lruvec_lock_irq(folio); in lru_note_cost_refault()
299 lru_note_cost_unlock_irq(lruvec, folio_is_file_lru(folio), in lru_note_cost_refault()
300 folio_nr_pages(folio), 0); in lru_note_cost_refault()
303 static void lru_activate(struct lruvec *lruvec, struct folio *folio) in lru_activate() argument
305 long nr_pages = folio_nr_pages(folio); in lru_activate()
307 if (folio_test_active(folio) || folio_test_unevictable(folio)) in lru_activate()
311 lruvec_del_folio(lruvec, folio); in lru_activate()
312 folio_set_active(folio); in lru_activate()
313 lruvec_add_folio(lruvec, folio); in lru_activate()
314 trace_mm_lru_activate(folio); in lru_activate()
329 void folio_activate(struct folio *folio) in folio_activate() argument
331 if (folio_test_active(folio) || folio_test_unevictable(folio) || in folio_activate()
332 !folio_test_lru(folio)) in folio_activate()
335 folio_batch_add_and_move(folio, lru_activate); in folio_activate()
343 void folio_activate(struct folio *folio) in folio_activate() argument
347 if (!folio_test_clear_lru(folio)) in folio_activate()
350 lruvec = folio_lruvec_lock_irq(folio); in folio_activate()
351 lru_activate(lruvec, folio); in folio_activate()
353 folio_set_lru(folio); in folio_activate()
357 static void __lru_cache_activate_folio(struct folio *folio) in __lru_cache_activate_folio() argument
376 struct folio *batch_folio = fbatch->folios[i]; in __lru_cache_activate_folio()
378 if (batch_folio == folio) { in __lru_cache_activate_folio()
379 folio_set_active(folio); in __lru_cache_activate_folio()
389 static void lru_gen_inc_refs(struct folio *folio) in lru_gen_inc_refs() argument
391 unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f); in lru_gen_inc_refs()
393 if (folio_test_unevictable(folio)) in lru_gen_inc_refs()
397 if (!folio_test_referenced(folio)) { in lru_gen_inc_refs()
398 set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced)); in lru_gen_inc_refs()
404 if (!folio_test_workingset(folio)) in lru_gen_inc_refs()
405 folio_set_workingset(folio); in lru_gen_inc_refs()
410 } while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags)); in lru_gen_inc_refs()
413 static bool lru_gen_clear_refs(struct folio *folio) in lru_gen_clear_refs() argument
416 int gen = folio_lru_gen(folio); in lru_gen_clear_refs()
417 int type = folio_is_file_lru(folio); in lru_gen_clear_refs()
422 set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS | BIT(PG_workingset), 0); in lru_gen_clear_refs()
424 lrugen = &folio_lruvec(folio)->lrugen; in lru_gen_clear_refs()
431 static void lru_gen_inc_refs(struct folio *folio) in lru_gen_inc_refs() argument
435 static bool lru_gen_clear_refs(struct folio *folio) in lru_gen_clear_refs() argument
455 void folio_mark_accessed(struct folio *folio) in folio_mark_accessed() argument
457 if (folio_test_dropbehind(folio)) in folio_mark_accessed()
460 lru_gen_inc_refs(folio); in folio_mark_accessed()
464 if (!folio_test_referenced(folio)) { in folio_mark_accessed()
465 folio_set_referenced(folio); in folio_mark_accessed()
466 } else if (folio_test_unevictable(folio)) { in folio_mark_accessed()
472 } else if (!folio_test_active(folio)) { in folio_mark_accessed()
479 if (folio_test_lru(folio)) in folio_mark_accessed()
480 folio_activate(folio); in folio_mark_accessed()
482 __lru_cache_activate_folio(folio); in folio_mark_accessed()
483 folio_clear_referenced(folio); in folio_mark_accessed()
484 workingset_activation(folio); in folio_mark_accessed()
486 if (folio_test_idle(folio)) in folio_mark_accessed()
487 folio_clear_idle(folio); in folio_mark_accessed()
500 void folio_add_lru(struct folio *folio) in folio_add_lru() argument
502 VM_BUG_ON_FOLIO(folio_test_active(folio) && in folio_add_lru()
503 folio_test_unevictable(folio), folio); in folio_add_lru()
504 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in folio_add_lru()
507 if (lru_gen_enabled() && !folio_test_unevictable(folio) && in folio_add_lru()
509 folio_set_active(folio); in folio_add_lru()
511 folio_batch_add_and_move(folio, lru_add); in folio_add_lru()
523 void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma) in folio_add_lru_vma() argument
525 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); in folio_add_lru_vma()
528 mlock_new_folio(folio); in folio_add_lru_vma()
530 folio_add_lru(folio); in folio_add_lru_vma()
554 static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio) in lru_deactivate_file() argument
556 bool active = folio_test_active(folio) || lru_gen_enabled(); in lru_deactivate_file()
557 long nr_pages = folio_nr_pages(folio); in lru_deactivate_file()
559 if (folio_test_unevictable(folio)) in lru_deactivate_file()
563 if (folio_mapped(folio)) in lru_deactivate_file()
566 lruvec_del_folio(lruvec, folio); in lru_deactivate_file()
567 folio_clear_active(folio); in lru_deactivate_file()
568 folio_clear_referenced(folio); in lru_deactivate_file()
570 if (folio_test_writeback(folio) || folio_test_dirty(folio)) { in lru_deactivate_file()
577 lruvec_add_folio(lruvec, folio); in lru_deactivate_file()
578 folio_set_reclaim(folio); in lru_deactivate_file()
584 lruvec_add_folio_tail(lruvec, folio); in lru_deactivate_file()
595 static void lru_deactivate(struct lruvec *lruvec, struct folio *folio) in lru_deactivate() argument
597 long nr_pages = folio_nr_pages(folio); in lru_deactivate()
599 if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled())) in lru_deactivate()
602 lruvec_del_folio(lruvec, folio); in lru_deactivate()
603 folio_clear_active(folio); in lru_deactivate()
604 folio_clear_referenced(folio); in lru_deactivate()
605 lruvec_add_folio(lruvec, folio); in lru_deactivate()
611 static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio) in lru_lazyfree() argument
613 long nr_pages = folio_nr_pages(folio); in lru_lazyfree()
615 if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) || in lru_lazyfree()
616 folio_test_swapcache(folio) || folio_test_unevictable(folio)) in lru_lazyfree()
619 lruvec_del_folio(lruvec, folio); in lru_lazyfree()
620 folio_clear_active(folio); in lru_lazyfree()
622 lru_gen_clear_refs(folio); in lru_lazyfree()
624 folio_clear_referenced(folio); in lru_lazyfree()
630 folio_clear_swapbacked(folio); in lru_lazyfree()
631 lruvec_add_folio(lruvec, folio); in lru_lazyfree()
686 void deactivate_file_folio(struct folio *folio) in deactivate_file_folio() argument
689 if (folio_test_unevictable(folio) || !folio_test_lru(folio)) in deactivate_file_folio()
692 if (lru_gen_enabled() && lru_gen_clear_refs(folio)) in deactivate_file_folio()
695 folio_batch_add_and_move(folio, lru_deactivate_file); in deactivate_file_folio()
706 void folio_deactivate(struct folio *folio) in folio_deactivate() argument
708 if (folio_test_unevictable(folio) || !folio_test_lru(folio)) in folio_deactivate()
711 if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio)) in folio_deactivate()
714 folio_batch_add_and_move(folio, lru_deactivate); in folio_deactivate()
724 void folio_mark_lazyfree(struct folio *folio) in folio_mark_lazyfree() argument
726 if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) || in folio_mark_lazyfree()
727 !folio_test_lru(folio) || in folio_mark_lazyfree()
728 folio_test_swapcache(folio) || folio_test_unevictable(folio)) in folio_mark_lazyfree()
731 folio_batch_add_and_move(folio, lru_lazyfree); in folio_mark_lazyfree()
958 struct folio *folio = folios->folios[i]; in folios_put_refs() local
961 if (is_huge_zero_folio(folio)) in folios_put_refs()
964 if (folio_is_zone_device(folio)) { in folios_put_refs()
969 if (folio_ref_sub_and_test(folio, nr_refs)) in folios_put_refs()
970 free_zone_device_folio(folio); in folios_put_refs()
974 if (!folio_ref_sub_and_test(folio, nr_refs)) in folios_put_refs()
978 if (folio_test_hugetlb(folio)) { in folios_put_refs()
983 free_huge_folio(folio); in folios_put_refs()
986 folio_unqueue_deferred_split(folio); in folios_put_refs()
987 __page_cache_release(folio, &lruvec, &flags); in folios_put_refs()
990 folios->folios[j] = folio; in folios_put_refs()
1028 struct folio *folio = page_folio(encoded_page_ptr(encoded[i])); in release_pages() local
1036 if (folio_batch_add(&fbatch, folio) > 0) in release_pages()
1080 struct folio *folio = fbatch->folios[i]; in folio_batch_remove_exceptionals() local
1081 if (!xa_is_value(folio)) in folio_batch_remove_exceptionals()
1082 fbatch->folios[j++] = folio; in folio_batch_remove_exceptionals()