Searched refs:folio_list (Results 1 – 7 of 7) sorted by relevance
/linux/mm/damon/ |
H A D | paddr.c | 250 LIST_HEAD(folio_list); in damon_pa_pageout() 287 list_add(&folio->lru, &folio_list); in damon_pa_pageout() 293 applied = reclaim_pages(&folio_list); 372 static unsigned int damon_pa_migrate_folio_list(struct list_head *folio_list, in damon_pa_migrate_folio_list() 381 while (!list_empty(folio_list)) { in damon_pa_migrate_folio_list() 386 folio = lru_to_folio(folio_list); in damon_pa_migrate_folio_list() 399 /* 'folio_list' is always empty here */ in damon_pa_migrate_folio_list() 406 * those back on @folio_list in damon_pa_migrate_folio_list() 409 list_splice_init(&migrate_folios, folio_list); in damon_pa_migrate_folio_list() 413 list_splice(&ret_folios, folio_list); in damon_pa_migrate_folio_list() 363 damon_pa_migrate_folio_list(struct list_head * folio_list,struct pglist_data * pgdat,int target_nid) damon_pa_migrate_folio_list() argument 415 damon_pa_migrate_pages(struct list_head * folio_list,int target_nid) damon_pa_migrate_pages() argument [all...] |
/linux/mm/ |
H A D | hugetlb_vmemmap.h | 23 struct list_head *folio_list, 26 void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list); 52 struct list_head *folio_list, in hugetlb_vmemmap_restore_folios() argument 55 list_splice_init(folio_list, non_hvo_folios); in hugetlb_vmemmap_restore_folios() 63 static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list) in hugetlb_vmemmap_optimize_folios() argument
|
H A D | hugetlb_vmemmap.c | 503 * @folio_list: list of folios. 511 * non-processed folios will remain on folio_list. 514 struct list_head *folio_list, in hugetlb_vmemmap_restore_folios() argument 522 list_for_each_entry_safe(folio, t_folio, folio_list, lru) { in hugetlb_vmemmap_restore_folios() 648 void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list) in hugetlb_vmemmap_optimize_folios() argument 654 list_for_each_entry(folio, folio_list, lru) { in hugetlb_vmemmap_optimize_folios() 669 list_for_each_entry(folio, folio_list, lru) { in hugetlb_vmemmap_optimize_folios()
|
H A D | vmscan.c | 639 struct swap_iocb **plug, struct list_head *folio_list) in pageout() argument 693 wbc.list = folio_list; in pageout() 1082 static unsigned int shrink_folio_list(struct list_head *folio_list, in shrink_folio_list() argument 1100 while (!list_empty(folio_list)) { in shrink_folio_list() 1109 folio = lru_to_folio(folio_list); in shrink_folio_list() 1228 list_add_tail(&folio->lru, folio_list); in shrink_folio_list() 1279 split_folio_to_list(folio, folio_list)) in shrink_folio_list() 1288 if (split_folio_to_list(folio, folio_list)) in shrink_folio_list() 1402 switch (pageout(folio, mapping, &plug, folio_list)) { in shrink_folio_list() 1559 list_splice_init(&demote_folios, folio_list); in shrink_folio_list() [all …]
|
H A D | hugetlb.c | 1709 struct list_head *folio_list, in bulk_vmemmap_restore_error() argument 1741 list_for_each_entry_safe(folio, t_folio, folio_list, lru) in bulk_vmemmap_restore_error() 1760 struct list_head *folio_list) in update_and_free_pages_bulk() argument 1772 ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios); in update_and_free_pages_bulk() 1774 bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios); in update_and_free_pages_bulk() 1785 VM_WARN_ON(!list_empty(folio_list)); in update_and_free_pages_bulk() 2035 struct list_head *folio_list) in prep_and_add_allocated_folios() argument 2041 hugetlb_vmemmap_optimize_folios(h, folio_list); in prep_and_add_allocated_folios() 2045 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { in prep_and_add_allocated_folios() 3234 struct list_head *folio_list) in prep_and_add_bootmem_folios() argument [all …]
|
H A D | madvise.c | 356 LIST_HEAD(folio_list); in madvise_cold_or_pageout_pte_range() 427 list_add(&folio->lru, &folio_list); in madvise_cold_or_pageout_pte_range() 434 reclaim_pages(&folio_list); in madvise_cold_or_pageout_pte_range() 547 list_add(&folio->lru, &folio_list); in madvise_cold_or_pageout_pte_range() 558 reclaim_pages(&folio_list); in madvise_cold_or_pageout_pte_range()
|
H A D | internal.h | 1153 unsigned long reclaim_pages(struct list_head *folio_list); 1155 struct list_head *folio_list);
|