| /linux/mm/ |
| H A D | page_vma_mapped.c | 10 static inline bool not_found(struct page_vma_mapped_walk *pvmw) in not_found() argument 12 page_vma_mapped_walk_done(pvmw); in not_found() 16 static bool map_pte(struct page_vma_mapped_walk *pvmw, pmd_t *pmdvalp, in map_pte() argument 22 if (pvmw->flags & PVMW_SYNC) { in map_pte() 24 pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, in map_pte() 25 pvmw->address, &pvmw->ptl); in map_pte() 26 *ptlp = pvmw->ptl; in map_pte() 27 return !!pvmw->pte; in map_pte() 30 is_migration = pvmw->flags & PVMW_MIGRATION; in map_pte() 39 pvmw->pte = pte_offset_map_rw_nolock(pvmw->vma->vm_mm, pvmw->pmd, in map_pte() [all …]
|
| H A D | rmap.c | 828 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in folio_referenced_one() 831 while (page_vma_mapped_walk(&pvmw)) { in folio_referenced_one() 832 address = pvmw.address; in folio_referenced_one() 839 if (pvmw.pte && ptes != pvmw.nr_pages) in folio_referenced_one() 849 if (pvmw.flags & PVMW_PGTABLE_CROSSED) in folio_referenced_one() 854 page_vma_mapped_walk_done(&pvmw); in folio_referenced_one() 869 page_vma_mapped_walk_done(&pvmw); in folio_referenced_one() 873 if (lru_gen_enabled() && pvmw.pte) { in folio_referenced_one() 874 if (lru_gen_look_around(&pvmw)) in folio_referenced_one() 876 } else if (pvmw.pte) { in folio_referenced_one() [all …]
|
| H A D | page_idle.c | 56 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in page_idle_clear_pte_refs_one() 59 while (page_vma_mapped_walk(&pvmw)) { in page_idle_clear_pte_refs_one() 60 addr = pvmw.address; in page_idle_clear_pte_refs_one() 61 if (pvmw.pte) { in page_idle_clear_pte_refs_one() 70 if (likely(pte_present(ptep_get(pvmw.pte)))) in page_idle_clear_pte_refs_one() 71 referenced |= ptep_test_and_clear_young(vma, addr, pvmw.pte); in page_idle_clear_pte_refs_one() 74 pmd_t pmdval = pmdp_get(pvmw.pmd); in page_idle_clear_pte_refs_one() 77 referenced |= pmdp_clear_young_notify(vma, addr, pvmw.pmd); in page_idle_clear_pte_refs_one()
|
| H A D | migrate.c | 298 static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, in try_to_map_unused_to_zeropage() argument 312 if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) || in try_to_map_unused_to_zeropage() 313 mm_forbids_zeropage(pvmw->vma->vm_mm)) in try_to_map_unused_to_zeropage() 324 newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address), in try_to_map_unused_to_zeropage() 325 pvmw->vma->vm_page_prot)); in try_to_map_unused_to_zeropage() 332 set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte); in try_to_map_unused_to_zeropage() 334 dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio)); in try_to_map_unused_to_zeropage() 350 DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION); in remove_migration_pte() 352 while (page_vma_mapped_walk(&pvmw)) { in remove_migration_pte() 362 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; in remove_migration_pte() [all …]
|
| H A D | ksm.c | 1276 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0); in write_protect_page() 1286 pvmw.address = page_address_in_vma(folio, folio_page(folio, 0), vma); in write_protect_page() 1287 if (pvmw.address == -EFAULT) in write_protect_page() 1290 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address, in write_protect_page() 1291 pvmw.address + PAGE_SIZE); in write_protect_page() 1294 if (!page_vma_mapped_walk(&pvmw)) in write_protect_page() 1296 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) in write_protect_page() 1299 entry = ptep_get(pvmw.pte); in write_protect_page() 1311 flush_cache_page(vma, pvmw.address, folio_pfn(folio)); in write_protect_page() 1326 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); in write_protect_page() [all …]
|
| H A D | huge_memory.c | 4862 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument 4866 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() 4868 unsigned long address = pvmw->address; in set_pmd_migration_entry() 4874 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry() 4878 if (unlikely(!pmd_present(*pvmw->pmd))) in set_pmd_migration_entry() 4879 pmdval = pmdp_huge_get_and_clear(vma->vm_mm, address, pvmw->pmd); in set_pmd_migration_entry() 4881 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry() 4886 set_pmd_at(mm, address, pvmw->pmd, pmdval); in set_pmd_migration_entry() 4907 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry() 4915 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) in remove_migration_pmd() argument [all …]
|
| H A D | internal.h | 1103 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw) in vma_address_end() argument 1105 struct vm_area_struct *vma = pvmw->vma; in vma_address_end() 1110 if (pvmw->nr_pages == 1) in vma_address_end() 1111 return pvmw->address + PAGE_SIZE; in vma_address_end() 1113 pgoff = pvmw->pgoff + pvmw->nr_pages; in vma_address_end()
|
| H A D | migrate_device.c | 203 struct page_vma_mapped_walk pvmw = { in migrate_vma_collect_huge_pmd() local 217 ret = set_pmd_migration_entry(&pvmw, folio_page(folio, 0)); in migrate_vma_collect_huge_pmd()
|
| H A D | vmscan.c | 4194 bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) in lru_gen_look_around() argument 4203 pte_t *pte = pvmw->pte; in lru_gen_look_around() 4204 unsigned long addr = pvmw->address; in lru_gen_look_around() 4205 struct vm_area_struct *vma = pvmw->vma; in lru_gen_look_around() 4206 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around() 4214 lockdep_assert_held(pvmw->ptl); in lru_gen_look_around() 4220 if (spin_is_contended(pvmw->ptl)) in lru_gen_look_around() 4285 update_bloom_filter(mm_state, max_seq, pvmw->pmd); in lru_gen_look_around()
|
| /linux/mm/damon/ |
| H A D | ops-common.c | 159 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in damon_folio_mkold_one() 161 while (page_vma_mapped_walk(&pvmw)) { in damon_folio_mkold_one() 162 addr = pvmw.address; in damon_folio_mkold_one() 163 if (pvmw.pte) in damon_folio_mkold_one() 164 damon_ptep_mkold(pvmw.pte, vma, addr); in damon_folio_mkold_one() 166 damon_pmdp_mkold(pvmw.pmd, vma, addr); in damon_folio_mkold_one() 195 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in damon_folio_young_one() 199 while (page_vma_mapped_walk(&pvmw)) { in damon_folio_young_one() 200 addr = pvmw.address; in damon_folio_young_one() 201 if (pvmw.pte) { in damon_folio_young_one() [all …]
|
| /linux/include/linux/ |
| H A D | rmap.h | 952 static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) in page_vma_mapped_walk_done() argument 955 if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma)) in page_vma_mapped_walk_done() 956 pte_unmap(pvmw->pte); in page_vma_mapped_walk_done() 957 if (pvmw->ptl) in page_vma_mapped_walk_done() 958 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_done() 971 page_vma_mapped_walk_restart(struct page_vma_mapped_walk *pvmw) in page_vma_mapped_walk_restart() argument 973 WARN_ON_ONCE(!pvmw->pmd && !pvmw->pte); in page_vma_mapped_walk_restart() 975 if (likely(pvmw->ptl)) in page_vma_mapped_walk_restart() 976 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_restart() 980 pvmw->ptl = NULL; in page_vma_mapped_walk_restart() [all …]
|
| H A D | swapops.h | 325 extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 328 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, 342 static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument 348 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, in remove_migration_pmd() argument
|
| H A D | mmzone.h | 619 bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw); 638 static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) in lru_gen_look_around() argument
|