| /linux/mm/ |
| H A D | mprotect.c | 121 pte_t *ptep, pte_t oldpte, pte_t ptent, int nr_ptes, in prot_commit_flush_ptes() argument 131 ptent = pte_advance_pfn(ptent, idx); in prot_commit_flush_ptes() 134 ptent = pte_mkwrite(ptent, vma); in prot_commit_flush_ptes() 136 modify_prot_commit_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes); in prot_commit_flush_ptes() 137 if (pte_needs_flush(oldpte, ptent)) in prot_commit_flush_ptes() 174 pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb) in commit_anon_folio_batch() argument 184 prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, len, in commit_anon_folio_batch() 193 pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb) in set_write_prot_commit_flush_ptes() argument 198 set_write = can_change_shared_pte_writable(vma, ptent); in set_write_prot_commit_flush_ptes() 199 prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes, in set_write_prot_commit_flush_ptes() [all …]
|
| H A D | page_vma_mapped.c | 20 pte_t ptent; in map_pte() local 44 ptent = ptep_get(pvmw->pte); in map_pte() 46 if (pte_none(ptent)) { in map_pte() 48 } else if (pte_present(ptent)) { in map_pte() 70 entry = softleaf_from_pte(ptent); in map_pte() 110 pte_t ptent = ptep_get(pvmw->pte); in check_pte() local 113 const softleaf_t entry = softleaf_from_pte(ptent); in check_pte() 119 } else if (pte_present(ptent)) { in check_pte() 120 pfn = pte_pfn(ptent); in check_pte() 122 const softleaf_t entry = softleaf_from_pte(ptent); in check_pte()
|
| H A D | mapping_dirty_helpers.c | 38 pte_t ptent = ptep_get(pte); in wp_pte() local 40 if (pte_write(ptent)) { in wp_pte() 43 ptent = pte_wrprotect(old_pte); in wp_pte() 44 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in wp_pte() 94 pte_t ptent = ptep_get(pte); in clean_record_pte() local 96 if (pte_dirty(ptent)) { in clean_record_pte() 101 ptent = pte_mkclean(old_pte); in clean_record_pte() 102 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in clean_record_pte()
|
| H A D | madvise.c | 363 pte_t *start_pte, *pte, ptent; in madvise_cold_or_pageout_pte_range() local 459 ptent = ptep_get(pte); in madvise_cold_or_pageout_pte_range() 471 if (pte_none(ptent)) in madvise_cold_or_pageout_pte_range() 474 if (!pte_present(ptent)) in madvise_cold_or_pageout_pte_range() 477 folio = vm_normal_folio(vma, addr, ptent); in madvise_cold_or_pageout_pte_range() 489 nr = madvise_folio_pte_batch(addr, end, folio, pte, &ptent); in madvise_cold_or_pageout_pte_range() 531 if (!pageout && pte_young(ptent)) { in madvise_cold_or_pageout_pte_range() 662 pte_t *start_pte, *pte, ptent; in madvise_free_pte_range() local 681 ptent = ptep_get(pte); in madvise_free_pte_range() 683 if (pte_none(ptent)) in madvise_free_pte_range() [all …]
|
| H A D | mlock.c | 311 pte_t ptent = ptep_get(pte); in folio_mlock_step() local 316 return folio_pte_batch(folio, pte, ptent, count); in folio_mlock_step() 357 pte_t ptent; in mlock_pte_range() local 385 ptent = ptep_get(pte); in mlock_pte_range() 386 if (!pte_present(ptent)) in mlock_pte_range() 388 folio = vm_normal_folio(vma, addr, ptent); in mlock_pte_range()
|
| H A D | memory.c | 1217 pte_t ptent; in copy_pte_range() local 1274 ptent = ptep_get(src_pte); in copy_pte_range() 1275 if (pte_none(ptent)) { in copy_pte_range() 1279 if (unlikely(!pte_present(ptent))) { in copy_pte_range() 1293 ptent = ptep_get(src_pte); in copy_pte_range() 1294 VM_WARN_ON_ONCE(!pte_present(ptent)); in copy_pte_range() 1305 ptent, addr, max_nr, rss, &prealloc); in copy_pte_range() 1618 struct page *page, pte_t *pte, pte_t ptent, unsigned int nr, in zap_present_folio_ptes() argument 1626 ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); in zap_present_folio_ptes() 1627 if (pte_dirty(ptent)) { in zap_present_folio_ptes() [all …]
|
| H A D | highmem.c | 205 pte_t ptent; in flush_all_zero_pkmaps() local 218 ptent = ptep_get(&pkmap_page_table[i]); in flush_all_zero_pkmaps() 219 BUG_ON(pte_none(ptent)); in flush_all_zero_pkmaps() 228 page = pte_page(ptent); in flush_all_zero_pkmaps()
|
| H A D | khugepaged.c | 1565 pte_t ptent = ptep_get(pte); in collapse_pte_mapped_thp() local 1568 if (pte_none(ptent)) in collapse_pte_mapped_thp() 1572 if (!pte_present(ptent)) { in collapse_pte_mapped_thp() 1577 page = vm_normal_page(vma, addr, ptent); in collapse_pte_mapped_thp() 1622 pte_t ptent = ptep_get(pte); in collapse_pte_mapped_thp() local 1626 if (pte_none(ptent)) in collapse_pte_mapped_thp() 1634 if (!pte_present(ptent)) { in collapse_pte_mapped_thp() 1638 page = vm_normal_page(vma, addr, ptent); in collapse_pte_mapped_thp() 1643 nr_batch_ptes = folio_pte_batch(folio, pte, ptent, max_nr_batch_ptes); in collapse_pte_mapped_thp()
|
| H A D | mempolicy.c | 687 pte_t ptent; in queue_folios_pte_range() local 706 ptent = ptep_get(pte); in queue_folios_pte_range() 707 if (pte_none(ptent)) in queue_folios_pte_range() 709 if (!pte_present(ptent)) { in queue_folios_pte_range() 710 const softleaf_t entry = softleaf_from_pte(ptent); in queue_folios_pte_range() 716 folio = vm_normal_folio(vma, addr, ptent); in queue_folios_pte_range() 720 nr = folio_pte_batch(folio, pte, ptent, max_nr); in queue_folios_pte_range()
|
| H A D | vmalloc.c | 365 pte_t ptent; in vunmap_pte_range() local 379 ptent = huge_ptep_get_and_clear(&init_mm, addr, pte, size); in vunmap_pte_range() 384 ptent = ptep_get_and_clear(&init_mm, addr, pte); in vunmap_pte_range() 385 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); in vunmap_pte_range() 3572 pte_t ptent; in vmap_pfn_apply() local 3577 ptent = pte_mkspecial(pfn_pte(pfn, data->prot)); in vmap_pfn_apply() 3578 set_pte_at(&init_mm, addr, pte, ptent); in vmap_pfn_apply()
|
| H A D | memory-failure.c | 346 pte_t ptent; in dev_pagemap_mapping_shift() local 368 ptent = ptep_get(pte); in dev_pagemap_mapping_shift() 369 if (pte_present(ptent)) in dev_pagemap_mapping_shift()
|
| H A D | vmscan.c | 3524 pte_t ptent = ptep_get(pte + i); in walk_pte_range() local 3529 pfn = get_pte_pfn(ptent, args->vma, addr, pgdat); in walk_pte_range() 3547 if (pte_dirty(ptent)) in walk_pte_range() 4253 pte_t ptent = ptep_get(pte + i); in lru_gen_look_around() local 4255 pfn = get_pte_pfn(ptent, vma, addr, pgdat); in lru_gen_look_around() 4273 if (pte_dirty(ptent)) in lru_gen_look_around()
|
| H A D | swapfile.c | 2263 pte_t ptent; in unuse_pte_range() local 2271 ptent = ptep_get_lockless(pte); in unuse_pte_range() 2272 entry = softleaf_from_pte(ptent); in unuse_pte_range()
|
| /linux/fs/proc/ |
| H A D | task_mmu.c | 1013 pte_t ptent = ptep_get(pte); in smaps_pte_entry() local 1015 if (pte_present(ptent)) { in smaps_pte_entry() 1016 page = vm_normal_page(vma, addr, ptent); in smaps_pte_entry() 1017 young = pte_young(ptent); in smaps_pte_entry() 1018 dirty = pte_dirty(ptent); in smaps_pte_entry() 1020 } else if (pte_none(ptent)) { in smaps_pte_entry() 1023 const softleaf_t entry = softleaf_from_pte(ptent); in smaps_pte_entry() 1228 pte_t ptent; in smaps_hugetlb_range() local 1231 ptent = huge_ptep_get(walk->mm, addr, pte); in smaps_hugetlb_range() 1232 if (pte_present(ptent)) { in smaps_hugetlb_range() [all …]
|
| /linux/mm/damon/ |
| H A D | vaddr.c | 435 pte_t ptent; in damon_young_pmd_entry() local 464 ptent = ptep_get(pte); in damon_young_pmd_entry() 465 if (!pte_present(ptent)) in damon_young_pmd_entry() 467 folio = vm_normal_folio(walk->vma, addr, ptent); in damon_young_pmd_entry() 470 if (pte_young(ptent) || !folio_test_idle(folio) || in damon_young_pmd_entry() 709 pte_t *start_pte, *pte, ptent; in damos_va_migrate_pmd_entry() local 738 ptent = ptep_get(pte); in damos_va_migrate_pmd_entry() 740 if (pte_none(ptent) || !pte_present(ptent)) in damos_va_migrate_pmd_entry() 742 folio = vm_normal_folio(walk->vma, addr, ptent); in damos_va_migrate_pmd_entry() 873 pte_t *start_pte, *pte, ptent; in damos_va_stat_pmd_entry() local [all …]
|
| /linux/mm/kasan/ |
| H A D | init.c | 348 pte_t ptent; in kasan_remove_pte_table() local 355 ptent = ptep_get(pte); in kasan_remove_pte_table() 357 if (!pte_present(ptent)) in kasan_remove_pte_table() 360 if (WARN_ON(!kasan_early_shadow_page_entry(ptent))) in kasan_remove_pte_table()
|
| /linux/fs/ |
| H A D | userfaultfd.c | 294 pte_t ptent; in userfaultfd_must_wait() local 333 ptent = ptep_get(pte); in userfaultfd_must_wait() 337 if (pte_none(ptent)) in userfaultfd_must_wait() 340 if (pte_is_uffd_marker(ptent)) in userfaultfd_must_wait() 346 if (!pte_write(ptent) && (reason & VM_UFFD_WP)) in userfaultfd_must_wait()
|