| /linux/arch/arm/mm/ |
| H A D | copypage-v6.c | 91 set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL)); in v6_copy_user_highpage_aliasing() 92 set_top_pte(kto, mk_pte(to, PAGE_KERNEL)); in v6_copy_user_highpage_aliasing() 117 set_top_pte(to, mk_pte(page, PAGE_KERNEL)); in v6_clear_user_highpage_aliasing()
|
| H A D | copypage-xscale.c | 95 set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); in xscale_mc_copy_user_highpage()
|
| H A D | copypage-v4mc.c | 75 set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); in v4_mc_copy_user_highpage()
|
| H A D | dma-mapping.c | 315 set_pte_ext(pte, mk_pte(page, prot), 0); in __dma_update_pte()
|
| /linux/arch/xtensa/mm/ |
| H A D | kasan_init.c | 26 mk_pte(virt_to_page(kasan_early_shadow_page), in kasan_early_init() 89 mk_pte(virt_to_page(kasan_early_shadow_page), in kasan_init()
|
| /linux/arch/sh/mm/ |
| H A D | kmap.c | 46 set_pte(kmap_coherent_pte - idx, mk_pte(page, PAGE_KERNEL)); in kmap_coherent()
|
| /linux/mm/ |
| H A D | hugetlb_vmemmap.c | 70 entry = mk_pte(head + i, pgprot); in vmemmap_split_pmd() 232 entry = mk_pte(walk->reuse_page, pgprot); in vmemmap_remap_pte() 276 set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot)); in vmemmap_restore_pte()
|
| H A D | highmem.c | 291 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); in map_new_virtual()
|
| H A D | memory.c | 876 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); in restore_exclusive_pte() 2295 pteval = mk_pte(page, prot); in insert_page_into_pte_locked() 2300 pteval = mk_pte(page, prot); in insert_page_into_pte_locked() 4961 pte = mk_pte(page, vma->vm_page_prot); in do_swap_page() 5462 entry = mk_pte(page, vma->vm_page_prot); in set_pte_range()
|
| H A D | migrate_device.c | 1049 entry = mk_pte(page, vma->vm_page_prot); in migrate_vma_insert_page()
|
| H A D | userfaultfd.c | 183 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); in mfill_atomic_install_pte()
|
| H A D | migrate.c | 380 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); in remove_migration_pte()
|
| H A D | ksm.c | 1425 newpte = mk_pte(kpage, vma->vm_page_prot); in replace_page()
|
| H A D | swapfile.c | 2231 new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot)); in unuse_pte()
|
| H A D | vmalloc.c | 554 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); in vmap_pages_pte_range()
|
| H A D | huge_memory.c | 3236 entry = mk_pte(page, READ_ONCE(vma->vm_page_prot)); in __split_huge_pmd_locked()
|
| /linux/arch/alpha/mm/ |
| H A D | init.c | 57 = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL)); in pgd_alloc()
|
| /linux/arch/sparc/mm/ |
| H A D | io-unit.c | 252 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); in iounit_alloc()
|
| H A D | iommu.c | 366 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); in sbus_iommu_alloc()
|
| /linux/arch/riscv/mm/ |
| H A D | kasan_init.c | 527 mk_pte(virt_to_page(kasan_early_shadow_page), in kasan_init()
|
| /linux/arch/x86/kernel/ |
| H A D | alternative.c | 2534 pte = mk_pte(pages[0], pgprot); in __text_poke() 2538 pte = mk_pte(pages[1], pgprot); in __text_poke()
|
| /linux/arch/x86/mm/pat/ |
| H A D | set_memory.c | 1200 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); in __split_large_page()
|
| /linux/include/linux/ |
| H A D | mm.h | 2050 static inline pte_t mk_pte(const struct page *page, pgprot_t pgprot) in mk_pte() function
|