Home
last modified time | relevance | path

Searched refs:pvmw (Results 1 – 7 of 7) sorted by relevance

/linux/mm/damon/
H A Dpaddr.c25 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in damon_folio_mkold_one()
27 while (page_vma_mapped_walk(&pvmw)) { in damon_folio_mkold_one()
28 addr = pvmw.address; in damon_folio_mkold_one()
29 if (pvmw.pte) in damon_folio_mkold_one()
30 damon_ptep_mkold(pvmw.pte, vma, addr); in damon_folio_mkold_one()
32 damon_pmdp_mkold(pvmw.pmd, vma, addr); in damon_folio_mkold_one()
94 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in damon_folio_young_one()
97 while (page_vma_mapped_walk(&pvmw)) { in damon_folio_young_one()
98 addr = pvmw.address; in damon_folio_young_one()
99 if (pvmw in damon_folio_young_one()
[all...]
/linux/mm/
H A Dpage_idle.c56 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in page_idle_clear_pte_refs_one()
59 while (page_vma_mapped_walk(&pvmw)) { in page_idle_clear_pte_refs_one()
60 addr = pvmw.address; in page_idle_clear_pte_refs_one()
61 if (pvmw.pte) { in page_idle_clear_pte_refs_one()
66 if (ptep_clear_young_notify(vma, addr, pvmw.pte)) in page_idle_clear_pte_refs_one()
69 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) in page_idle_clear_pte_refs_one()
H A Dmigrate.c192 static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, in try_to_map_unused_to_zeropage() argument
205 VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page); in try_to_map_unused_to_zeropage()
207 if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) || in try_to_map_unused_to_zeropage()
208 mm_forbids_zeropage(pvmw->vma->vm_mm)) in try_to_map_unused_to_zeropage()
223 newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address), in try_to_map_unused_to_zeropage()
224 pvmw->vma->vm_page_prot)); in try_to_map_unused_to_zeropage()
225 set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte); in try_to_map_unused_to_zeropage()
227 dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio)); in try_to_map_unused_to_zeropage()
243 DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION); in remove_migration_pte()
245 while (page_vma_mapped_walk(&pvmw)) { in remove_migration_pte()
[all …]
H A Dinternal.h1000 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw) in vma_address_end() argument
1002 struct vm_area_struct *vma = pvmw->vma; in vma_address_end()
1007 if (pvmw->nr_pages == 1) in vma_address_end()
1008 return pvmw->address + PAGE_SIZE; in vma_address_end()
1010 pgoff = pvmw->pgoff + pvmw->nr_pages; in vma_address_end()
H A Dhuge_memory.c4244 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument
4248 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry()
4250 unsigned long address = pvmw->address; in set_pmd_migration_entry()
4256 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry()
4260 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry()
4265 set_pmd_at(mm, address, pvmw->pmd, pmdval); in set_pmd_migration_entry()
4286 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry()
4294 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) in remove_migration_pmd() argument
4297 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd()
4299 unsigned long address = pvmw->address; in remove_migration_pmd()
[all …]
H A Dvmscan.c4147 bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) in lru_gen_look_around() argument
4156 pte_t *pte = pvmw->pte; in lru_gen_look_around()
4157 unsigned long addr = pvmw->address; in lru_gen_look_around()
4158 struct vm_area_struct *vma = pvmw->vma; in lru_gen_look_around()
4159 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around()
4167 lockdep_assert_held(pvmw->ptl); in lru_gen_look_around()
4173 if (spin_is_contended(pvmw->ptl)) in lru_gen_look_around()
4238 update_bloom_filter(mm_state, max_seq, pvmw->pmd); in lru_gen_look_around()
/linux/kernel/events/
H A Duprobes.c173 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); in __replace_page()
192 if (!page_vma_mapped_walk(&pvmw)) in __replace_page()
194 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); in __replace_page()
209 flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); in __replace_page()
210 ptep_clear_flush(vma, addr, pvmw.pte); in __replace_page()
212 set_pte_at(mm, addr, pvmw.pte, in __replace_page()
218 page_vma_mapped_walk_done(&pvmw); in __replace_page()