| /linux/mm/ |
| H A D | swap_state.c | 410 struct folio *new_folio = NULL; __read_swap_cache_async() local
|
| H A D | memremap.c | 489 struct folio *new_folio = (struct folio *)new_page; in zone_device_page_init() local 510 new_folio->mapping = NULL; in zone_device_page_init() 511 new_folio->pgmap = pgmap; /* Also clear compound head */ in zone_device_page_init() 512 new_folio->share = 0; /* fsdax only, unused for device private */ in zone_device_page_init() 513 VM_WARN_ON_FOLIO(folio_ref_count(new_folio), new_folio); in zone_device_page_init() 514 VM_WARN_ON_FOLIO(!folio_is_zone_device(new_folio), new_folio); in zone_device_page_init()
|
| H A D | hugetlb.c | 2700 struct folio *new_folio = NULL; in alloc_and_dissolve_hugetlb_folio() local 2737 if (!new_folio) { in alloc_and_dissolve_hugetlb_folio() 2740 new_folio = alloc_fresh_hugetlb_folio(h, gfp_mask, in alloc_and_dissolve_hugetlb_folio() 2742 if (!new_folio) in alloc_and_dissolve_hugetlb_folio() 2760 account_new_hugetlb_folio(h, new_folio); in alloc_and_dissolve_hugetlb_folio() 2761 enqueue_hugetlb_folio(h, new_folio); in alloc_and_dissolve_hugetlb_folio() 2774 if (new_folio) in alloc_and_dissolve_hugetlb_folio() 2775 update_and_free_hugetlb_folio(h, new_folio, false); in alloc_and_dissolve_hugetlb_folio() 4007 struct folio *new_folio = (struct folio *)page; in demote_free_hugetlb_folios() local 4012 new_folio->mapping = NULL; in demote_free_hugetlb_folios() [all …]
|
| H A D | huge_memory.c | 3445 static void lru_add_split_folio(struct folio *folio, struct folio *new_folio, in lru_add_split_folio() argument 3448 VM_BUG_ON_FOLIO(folio_test_lru(new_folio), folio); in lru_add_split_folio() 3457 folio_get(new_folio); in lru_add_split_folio() 3458 list_add_tail(&new_folio->lru, list); in lru_add_split_folio() 3463 new_folio->mlock_count = 0; in lru_add_split_folio() 3465 list_add_tail(&new_folio->lru, &folio->lru); in lru_add_split_folio() 3466 folio_set_lru(new_folio); in lru_add_split_folio() 3507 struct folio *new_folio = (struct folio *)new_head; in __split_folio_to_order() local 3509 VM_BUG_ON_PAGE(atomic_read(&new_folio->_mapcount) != -1, new_head); in __split_folio_to_order() 3524 new_folio->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; in __split_folio_to_order() [all …]
|
| H A D | khugepaged.c | 1854 struct folio *folio, *tmp, *new_folio; in collapse_file() local 1865 result = alloc_charge_folio(&new_folio, mm, cc); in collapse_file() 1871 __folio_set_locked(new_folio); in collapse_file() 1873 __folio_set_swapbacked(new_folio); in collapse_file() 1874 new_folio->index = start; in collapse_file() 1875 new_folio->mapping = mapping; in collapse_file() 2104 dst = folio_page(new_folio, 0); in collapse_file() 2190 lruvec_stat_mod_folio(new_folio, NR_SHMEM, HPAGE_PMD_NR); in collapse_file() 2191 lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR); in collapse_file() 2193 lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR); in collapse_file() [all …]
|
| H A D | memory.c | 1064 struct folio *new_folio; in copy_present_page() local 1067 new_folio = *prealloc; in copy_present_page() 1068 if (!new_folio) in copy_present_page() 1076 if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma)) in copy_present_page() 1080 __folio_mark_uptodate(new_folio); in copy_present_page() 1081 folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE); in copy_present_page() 1082 folio_add_lru_vma(new_folio, dst_vma); in copy_present_page() 1086 pte = folio_mk_pte(new_folio, dst_vma->vm_page_prot); in copy_present_page() 1201 struct folio *new_folio; in folio_prealloc() local 1204 new_folio = vma_alloc_zeroed_movable_folio(vma, addr); in folio_prealloc() [all …]
|
| H A D | ksm.c | 3109 struct folio *new_folio; in ksm_might_need_to_copy() local 3129 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr); in ksm_might_need_to_copy() 3130 if (new_folio && in ksm_might_need_to_copy() 3131 mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) { in ksm_might_need_to_copy() 3132 folio_put(new_folio); in ksm_might_need_to_copy() 3133 new_folio = NULL; in ksm_might_need_to_copy() 3135 if (new_folio) { in ksm_might_need_to_copy() 3136 if (copy_mc_user_highpage(folio_page(new_folio, 0), page, in ksm_might_need_to_copy() 3138 folio_put(new_folio); in ksm_might_need_to_copy() 3141 folio_set_dirty(new_folio); in ksm_might_need_to_copy() [all …]
|
| H A D | hugetlb_cgroup.c | 888 void hugetlb_cgroup_migrate(struct folio *old_folio, struct folio *new_folio) in hugetlb_cgroup_migrate() argument 904 set_hugetlb_cgroup(new_folio, h_cg); in hugetlb_cgroup_migrate() 905 set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd); in hugetlb_cgroup_migrate() 906 list_move(&new_folio->lru, &h->hugepage_activelist); in hugetlb_cgroup_migrate()
|
| /linux/include/linux/ |
| H A D | memremap.h | 248 struct folio *new_folio) in zone_device_private_split_cb() argument 252 if (new_folio) { in zone_device_private_split_cb() 253 new_folio->pgmap = original_folio->pgmap; in zone_device_private_split_cb() 254 new_folio->page.mapping = in zone_device_private_split_cb() 259 new_folio); in zone_device_private_split_cb() 299 struct folio *new_folio) in zone_device_private_split_cb() argument
|
| H A D | hugetlb_cgroup.h | 157 struct folio *new_folio); 266 struct folio *new_folio) in hugetlb_cgroup_migrate() argument
|
| H A D | hugetlb.h | 159 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason); 436 struct folio *new_folio, int reason) in move_hugetlb_state() argument
|
| /linux/include/trace/events/ |
| H A D | huge_memory.h | 203 TP_PROTO(struct mm_struct *mm, struct folio *new_folio, pgoff_t index, 206 TP_ARGS(mm, new_folio, index, addr, is_shmem, file, nr, result), 220 __entry->hpfn = new_folio ? folio_pfn(new_folio) : -1;
|
| /linux/fs/minix/ |
| H A D | namei.c | 218 struct folio *new_folio; in minix_rename() local 237 new_de = minix_find_entry(new_dentry, &new_folio); in minix_rename() 240 err = minix_set_link(new_de, new_folio, old_inode); in minix_rename() 241 folio_release_kmap(new_folio, new_de); in minix_rename()
|
| /linux/fs/jbd2/ |
| H A D | journal.c | 332 struct folio *new_folio; in jbd2_journal_write_metadata_buffer() local 359 new_folio = virt_to_folio(jh_in->b_frozen_data); in jbd2_journal_write_metadata_buffer() 360 new_offset = offset_in_folio(new_folio, jh_in->b_frozen_data); in jbd2_journal_write_metadata_buffer() 368 new_folio = bh_in->b_folio; in jbd2_journal_write_metadata_buffer() 369 new_offset = offset_in_folio(new_folio, bh_in->b_data); in jbd2_journal_write_metadata_buffer() 370 mapped_data = kmap_local_folio(new_folio, new_offset); in jbd2_journal_write_metadata_buffer() 396 memcpy_from_folio(tmp, new_folio, new_offset, bh_in->b_size); in jbd2_journal_write_metadata_buffer() 405 new_folio = virt_to_folio(jh_in->b_frozen_data); in jbd2_journal_write_metadata_buffer() 406 new_offset = offset_in_folio(new_folio, jh_in->b_frozen_data); in jbd2_journal_write_metadata_buffer() 411 folio_set_bh(new_bh, new_folio, new_offset); in jbd2_journal_write_metadata_buffer()
|
| /linux/fs/ |
| H A D | dax.c | 403 struct folio *new_folio = (struct folio *)page; in dax_folio_put() local 408 new_folio->mapping = NULL; in dax_folio_put() 413 new_folio->pgmap = pgmap; in dax_folio_put() 414 new_folio->share = 0; in dax_folio_put() 415 WARN_ON_ONCE(folio_ref_count(new_folio)); in dax_folio_put()
|