/linux/arch/powerpc/mm/book3s64/ |
H A D | hash_tlb.c | 227 pte_t *start_pte; in flush_hash_table_pmd_range() local 241 start_pte = pte_offset_map(pmd, addr); in flush_hash_table_pmd_range() 242 if (!start_pte) in flush_hash_table_pmd_range() 244 for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) { in flush_hash_table_pmd_range() 250 pte_unmap(start_pte); in flush_hash_table_pmd_range()
|
/linux/mm/ |
H A D | madvise.c | 353 pte_t *start_pte, *pte, ptent; in madvise_cold_or_pageout_pte_range() 442 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range() 443 if (!start_pte) in madvise_cold_or_pageout_pte_range() 455 pte_unmap_unlock(start_pte, ptl); in madvise_cold_or_pageout_pte_range() 497 pte_unmap_unlock(start_pte, ptl); in madvise_cold_or_pageout_pte_range() 498 start_pte = NULL; in madvise_cold_or_pageout_pte_range() 502 start_pte = pte = in madvise_cold_or_pageout_pte_range() 504 if (!start_pte) in madvise_cold_or_pageout_pte_range() 553 if (start_pte) { in madvise_cold_or_pageout_pte_range() 555 pte_unmap_unlock(start_pte, pt in madvise_cold_or_pageout_pte_range() 345 pte_t *start_pte, *pte, ptent; madvise_cold_or_pageout_pte_range() local 651 pte_t *start_pte, *pte, ptent; madvise_free_pte_range() local [all...] |
H A D | mlock.c | 358 pte_t *start_pte, *pte; in mlock_pte_range() local 378 start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mlock_pte_range() 379 if (!start_pte) { in mlock_pte_range() 384 for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) { in mlock_pte_range() 405 pte_unmap(start_pte); in mlock_pte_range()
|
H A D | khugepaged.c | 1506 pte_t *start_pte, *pte; in collapse_pte_mapped_thp() local 1563 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); in collapse_pte_mapped_thp() 1564 if (!start_pte) /* mmap_lock + page lock should prevent this */ in collapse_pte_mapped_thp() 1568 for (i = 0, addr = haddr, pte = start_pte; in collapse_pte_mapped_thp() 1594 pte_unmap_unlock(start_pte, ptl); in collapse_pte_mapped_thp() 1611 start_pte = pte_offset_map_rw_nolock(mm, pmd, haddr, &pgt_pmd, &ptl); in collapse_pte_mapped_thp() 1612 if (!start_pte) /* mmap_lock + page lock should prevent this */ in collapse_pte_mapped_thp() 1623 for (i = 0, addr = haddr, pte = start_pte; in collapse_pte_mapped_thp() 1676 pte_unmap_unlock(start_pte, ptl); in collapse_pte_mapped_thp() 1699 if (start_pte) in collapse_pte_mapped_thp() [all …]
|
H A D | memory.c | 1599 pte_t *start_pte; in zap_pte_range() local 1606 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range() 1705 pte_unmap_unlock(start_pte, ptl); in zap_pte_range() 2117 pte_t *start_pte, *pte; in insert_pages() local 2142 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); in insert_pages() 2143 if (!start_pte) { in insert_pages() 2147 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { in insert_pages() 2151 pte_unmap_unlock(start_pte, pte_lock); in insert_pages() 2159 pte_unmap_unlock(start_pte, pte_lock); in insert_pages()
|
/linux/arch/parisc/mm/ |
H A D | init.c | 351 unsigned long start_pte; in map_pages() local 369 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); in map_pages() 398 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { in map_pages() 433 start_pte = 0; in map_pages()
|
/linux/fs/proc/ |
H A D | task_mmu.c | 2454 pte_t *pte, *start_pte; in pagemap_scan_pmd_entry() local 2467 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in pagemap_scan_pmd_entry() 2542 pte_unmap_unlock(start_pte, ptl); in pagemap_scan_pmd_entry()
|