| /linux/include/asm-generic/ |
| H A D | pgtable-nop4d.h | 9 typedef struct { pgd_t pgd; } p4d_t; member 21 static inline int pgd_none(pgd_t pgd) { return 0; } in pgd_none() argument 22 static inline int pgd_bad(pgd_t pgd) { return 0; } in pgd_bad() argument 23 static inline int pgd_present(pgd_t pgd) { return 1; } in pgd_present() argument 24 static inline void pgd_clear(pgd_t *pgd) { } in pgd_clear() argument 25 #define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd)) 27 #define pgd_populate(mm, pgd, p4d) do { } while (0) argument 28 #define pgd_populate_safe(mm, pgd, p4d) do { } while (0) argument 35 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) in p4d_offset() argument 37 return (p4d_t *)pgd; in p4d_offset() [all …]
|
| /linux/arch/x86/mm/ |
| H A D | kasan_init_64.c | 122 static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr, in kasan_populate_pgd() argument 129 if (pgd_none(*pgd)) { in kasan_populate_pgd() 131 pgd_populate(&init_mm, pgd, p); in kasan_populate_pgd() 134 p4d = p4d_offset(pgd, addr); in kasan_populate_pgd() 144 pgd_t *pgd; in kasan_populate_shadow() local 149 pgd = pgd_offset_k(addr); in kasan_populate_shadow() 152 kasan_populate_pgd(pgd, addr, next, nid); in kasan_populate_shadow() 153 } while (pgd++, addr = next, addr != end); in kasan_populate_shadow() 170 pgd_t *pgd; in clear_pgds() local 175 pgd = pgd_offset_k(start); in clear_pgds() [all …]
|
| H A D | pgtable.c | 58 static inline void pgd_list_add(pgd_t *pgd) in pgd_list_add() argument 60 struct ptdesc *ptdesc = virt_to_ptdesc(pgd); in pgd_list_add() 65 static inline void pgd_list_del(pgd_t *pgd) in pgd_list_del() argument 67 struct ptdesc *ptdesc = virt_to_ptdesc(pgd); in pgd_list_del() 72 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) in pgd_set_mm() argument 74 virt_to_ptdesc(pgd)->pt_mm = mm; in pgd_set_mm() 82 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) in pgd_ctor() argument 86 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, in pgd_ctor() 91 pgd_set_mm(pgd, mm); in pgd_ctor() 92 pgd_list_add(pgd); in pgd_ctor() [all …]
|
| H A D | pti.c | 131 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd) in __pti_set_user_pgtbl() argument 142 if (!pgdp_maps_userspace(pgdp) || (pgd.pgd & _PAGE_NOPTISHADOW)) in __pti_set_user_pgtbl() 143 return pgd; in __pti_set_user_pgtbl() 149 kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd; in __pti_set_user_pgtbl() 164 if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) && in __pti_set_user_pgtbl() 166 pgd.pgd |= _PAGE_NX; in __pti_set_user_pgtbl() 169 return pgd; in __pti_set_user_pgtbl() 180 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); in pti_user_pagetable_walk_p4d() local 188 if (pgd_none(*pgd)) { in pti_user_pagetable_walk_p4d() 193 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page))); in pti_user_pagetable_walk_p4d() [all …]
|
| H A D | init_64.c | 74 DEFINE_POPULATE(pgd_populate, pgd, p4d, init) in DEFINE_POPULATE() 150 pgd_t *pgd; in sync_global_pgds_l5() local 153 pgd = (pgd_t *)page_address(page) + pgd_index(addr); in sync_global_pgds_l5() 158 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd)) in sync_global_pgds_l5() 159 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); in sync_global_pgds_l5() 161 if (pgd_none(*pgd)) in sync_global_pgds_l5() 162 set_pgd(pgd, *pgd_ref); in sync_global_pgds_l5() 191 pgd_t *pgd; in sync_global_pgds_l4() local 195 pgd = (pgd_t *)page_address(page) + pgd_index(addr); in sync_global_pgds_l4() 196 p4d = p4d_offset(pgd, addr); in sync_global_pgds_l4() [all …]
|
| H A D | init_32.c | 67 static pmd_t * __init one_md_table_init(pgd_t *pgd) in one_md_table_init() argument 74 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { in one_md_table_init() 76 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); in one_md_table_init() 77 p4d = p4d_offset(pgd, 0); in one_md_table_init() 84 p4d = p4d_offset(pgd, 0); in one_md_table_init() 208 pgd_t *pgd; in page_table_range_init() local 220 pgd = pgd_base + pgd_idx; in page_table_range_init() 222 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { in page_table_range_init() 223 pmd = one_md_table_init(pgd); in page_table_range_init() 260 pgd_t *pgd; in kernel_physical_mapping_init() local [all …]
|
| H A D | fault.c | 175 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) in vmalloc_sync_one() argument 183 pgd += index; in vmalloc_sync_one() 184 pgd_k = init_mm.pgd + index; in vmalloc_sync_one() 194 p4d = p4d_offset(pgd, address); in vmalloc_sync_one() 297 pgd_t *pgd = &base[pgd_index(address)]; in dump_pagetable() local 304 pr_info("*pdpt = %016Lx ", pgd_val(*pgd)); in dump_pagetable() 305 if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) in dump_pagetable() 311 p4d = p4d_offset(pgd, address); in dump_pagetable() 353 pgd_t *pgd = base + pgd_index(address); in dump_pagetable() local 359 if (bad_address(pgd)) in dump_pagetable() [all …]
|
| /linux/arch/powerpc/include/asm/book3s/64/ |
| H A D | pgalloc.h | 39 static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd) in radix__pgd_free() argument 42 free_page((unsigned long)pgd); in radix__pgd_free() 44 free_pages((unsigned long)pgd, 4); in radix__pgd_free() 50 pgd_t *pgd; in pgd_alloc() local 55 pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), in pgd_alloc() 57 if (unlikely(!pgd)) in pgd_alloc() 58 return pgd; in pgd_alloc() 65 kmemleak_no_scan(pgd); in pgd_alloc() 76 memset(pgd, 0, PGD_TABLE_SIZE); in pgd_alloc() 78 return pgd; in pgd_alloc() [all …]
|
| /linux/arch/riscv/kvm/ |
| H A D | mmu.c | 30 gstage.pgd = kvm->arch.pgd; in mmu_wp_memory_region() 55 gstage.pgd = kvm->arch.pgd; in kvm_riscv_mmu_ioremap() 95 gstage.pgd = kvm->arch.pgd; in kvm_riscv_mmu_iounmap() 115 gstage.pgd = kvm->arch.pgd; in kvm_arch_mmu_enable_log_dirty_pt_masked() 147 gstage.pgd = kvm->arch.pgd; in kvm_arch_flush_shadow_memslot() 249 if (!kvm->arch.pgd) in kvm_unmap_gfn_range() 255 gstage.pgd = kvm->arch.pgd; in kvm_unmap_gfn_range() 269 if (!kvm->arch.pgd) in kvm_age_gfn() 277 gstage.pgd = kvm->arch.pgd; in kvm_age_gfn() 292 if (!kvm->arch.pgd) in kvm_test_age_gfn() [all …]
|
| /linux/arch/riscv/include/asm/ |
| H A D | pgtable-64.h | 343 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) in set_pgd() argument 346 WRITE_ONCE(*pgdp, pgd); in set_pgd() 348 set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) }); in set_pgd() 351 static inline int pgd_none(pgd_t pgd) in pgd_none() argument 354 return (pgd_val(pgd) == 0); in pgd_none() 359 static inline int pgd_present(pgd_t pgd) in pgd_present() argument 362 return (pgd_val(pgd) & _PAGE_PRESENT); in pgd_present() 367 static inline int pgd_bad(pgd_t pgd) in pgd_bad() argument 370 return !pgd_present(pgd); in pgd_bad() 375 static inline void pgd_clear(pgd_t *pgd) in pgd_clear() argument [all …]
|
| H A D | pgalloc.h | 62 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) in pgd_populate() argument 67 set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in pgd_populate() 71 static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, in pgd_populate_safe() argument 77 set_pgd_safe(pgd, in pgd_populate_safe() 104 static inline void sync_kernel_mappings(pgd_t *pgd) in sync_kernel_mappings() argument 106 memcpy(pgd + USER_PTRS_PER_PGD, in sync_kernel_mappings() 107 init_mm.pgd + USER_PTRS_PER_PGD, in sync_kernel_mappings() 113 pgd_t *pgd; in pgd_alloc() local 115 pgd = __pgd_alloc(mm, 0); in pgd_alloc() 116 if (likely(pgd != NULL)) { in pgd_alloc() [all …]
|
| /linux/mm/kasan/ |
| H A D | init.c | 33 static inline bool kasan_p4d_table(pgd_t pgd) in kasan_p4d_table() argument 35 return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d)); in kasan_p4d_table() 38 static inline bool kasan_p4d_table(pgd_t pgd) in kasan_p4d_table() argument 182 static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, in zero_p4d_populate() argument 185 p4d_t *p4d = p4d_offset(pgd, addr); in zero_p4d_populate() 235 pgd_t *pgd = pgd_offset_k(addr); in kasan_populate_early_shadow() local 254 pgd_populate_kernel(addr, pgd, in kasan_populate_early_shadow() 256 p4d = p4d_offset(pgd, addr); in kasan_populate_early_shadow() 268 if (pgd_none(*pgd)) { in kasan_populate_early_shadow() 271 if (!p4d_alloc(&init_mm, pgd, addr)) in kasan_populate_early_shadow() [all …]
|
| /linux/arch/x86/power/ |
| H A D | hibernate_32.c | 30 static pmd_t *resume_one_md_table_init(pgd_t *pgd) in resume_one_md_table_init() argument 41 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); in resume_one_md_table_init() 42 p4d = p4d_offset(pgd, 0); in resume_one_md_table_init() 47 p4d = p4d_offset(pgd, 0); in resume_one_md_table_init() 84 pgd_t *pgd; in resume_physical_mapping_init() local 90 pgd = pgd_base + pgd_idx; in resume_physical_mapping_init() 93 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { in resume_physical_mapping_init() 94 pmd = resume_one_md_table_init(pgd); in resume_physical_mapping_init() 147 pgd_t *pgd; in set_up_temporary_text_mapping() local 151 pgd = pgd_base + pgd_index(restore_jump_address); in set_up_temporary_text_mapping() [all …]
|
| H A D | hibernate_64.c | 28 static int set_up_temporary_text_mapping(pgd_t *pgd) in set_up_temporary_text_mapping() argument 77 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); in set_up_temporary_text_mapping() 81 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); in set_up_temporary_text_mapping() 100 pgd_t *pgd; in set_up_temporary_mappings() local 104 pgd = (pgd_t *)get_safe_page(GFP_ATOMIC); in set_up_temporary_mappings() 105 if (!pgd) in set_up_temporary_mappings() 109 result = set_up_temporary_text_mapping(pgd); in set_up_temporary_mappings() 118 result = kernel_ident_mapping_init(&info, pgd, mstart, mend); in set_up_temporary_mappings() 123 temp_pgt = __pa(pgd); in set_up_temporary_mappings()
|
| /linux/arch/sh/mm/ |
| H A D | fault.c | 39 pgd_t *pgd; in show_pte() local 42 pgd = mm->pgd; in show_pte() 44 pgd = get_TTB(); in show_pte() 46 if (unlikely(!pgd)) in show_pte() 47 pgd = swapper_pg_dir; in show_pte() 50 pr_alert("pgd = %p\n", pgd); in show_pte() 51 pgd += pgd_index(addr); in show_pte() 52 pr_alert("[%08lx] *pgd=%0*llx", addr, (u32)(sizeof(*pgd) * 2), in show_pte() 53 (u64)pgd_val(*pgd)); in show_pte() 61 if (pgd_none(*pgd)) in show_pte() [all …]
|
| H A D | pgtable.c | 14 pgd_t *pgd = x; in pgd_ctor() local 16 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); in pgd_ctor() 17 memcpy(pgd + USER_PTRS_PER_PGD, in pgd_ctor() 39 void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument 41 kmem_cache_free(pgd_cachep, pgd); in pgd_free()
|
| /linux/arch/x86/kernel/ |
| H A D | machine_kexec_32.c | 45 free_pages((unsigned long)image->arch.pgd, pgd_allocation_order()); in machine_kexec_free_page_tables() 46 image->arch.pgd = NULL; in machine_kexec_free_page_tables() 61 image->arch.pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in machine_kexec_alloc_page_tables() 69 if (!image->arch.pgd || in machine_kexec_alloc_page_tables() 80 pgd_t *pgd, pmd_t *pmd, pte_t *pte, in machine_kexec_page_table_set_one() argument 86 pgd += pgd_index(vaddr); in machine_kexec_page_table_set_one() 88 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) in machine_kexec_page_table_set_one() 89 set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT)); in machine_kexec_page_table_set_one() 91 p4d = p4d_offset(pgd, vaddr); in machine_kexec_page_table_set_one() 110 image->arch.pgd, pmd, image->arch.pte0, in machine_kexec_prepare_page_tables() [all …]
|
| /linux/arch/hexagon/include/asm/ |
| H A D | pgalloc.h | 23 pgd_t *pgd; in pgd_alloc() local 25 pgd = __pgd_alloc(mm, 0); in pgd_alloc() 35 memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t)); in pgd_alloc() 39 mm->context.ptbase = __pa(pgd); in pgd_alloc() 41 return pgd; in pgd_alloc() 83 pmdindex = (pgd_t *)pmd - mm->pgd; in pmd_populate_kernel() 84 ppmd = (pmd_t *)current->active_mm->pgd + pmdindex; in pmd_populate_kernel()
|
| /linux/arch/loongarch/mm/ |
| H A D | hugetlbpage.c | 19 pgd_t *pgd; in huge_pte_alloc() local 24 pgd = pgd_offset(mm, addr); in huge_pte_alloc() 25 p4d = p4d_alloc(mm, pgd, addr); in huge_pte_alloc() 36 pgd_t *pgd; in huge_pte_offset() local 41 pgd = pgd_offset(mm, addr); in huge_pte_offset() 42 if (pgd_present(pgdp_get(pgd))) { in huge_pte_offset() 43 p4d = p4d_offset(pgd, addr); in huge_pte_offset()
|
| /linux/arch/x86/platform/efi/ |
| H A D | efi_64.c | 70 pgd_t *pgd, *efi_pgd; in efi_alloc_page_tables() local 80 pgd = efi_pgd + pgd_index(EFI_VA_END); in efi_alloc_page_tables() 81 p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END); in efi_alloc_page_tables() 89 efi_mm.pgd = efi_pgd; in efi_alloc_page_tables() 98 free_page((unsigned long)pgd_page_vaddr(*pgd)); in efi_alloc_page_tables() 114 pgd_t *efi_pgd = efi_mm.pgd; in efi_sync_low_kernel_mappings() 184 pgd_t *pgd = efi_mm.pgd; in efi_setup_page_tables() local 194 if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) { in efi_setup_page_tables() 210 if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) { in efi_setup_page_tables() 219 if (sev_es_efi_map_ghcbs_cas(pgd)) { in efi_setup_page_tables() [all …]
|
| /linux/arch/x86/boot/startup/ |
| H A D | map_kernel.c | 95 pgdval_t *pgd; in __startup_64() local 126 pgd = rip_rel_ptr(early_top_pgt); in __startup_64() 127 pgd[pgd_index(__START_KERNEL_map)] += load_delta; in __startup_64() 133 pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE; in __startup_64() 159 pgd[i + 0] = (pgdval_t)p4d + pgtable_flags; in __startup_64() 160 pgd[i + 1] = (pgdval_t)p4d + pgtable_flags; in __startup_64() 167 pgd[i + 0] = (pgdval_t)pud + pgtable_flags; in __startup_64() 168 pgd[i + 1] = (pgdval_t)pud + pgtable_flags; in __startup_64()
|
| H A D | sme.c | 71 pgd_t *pgd; member 104 pgd_p = ppd->pgd + pgd_index(ppd->vaddr); in sme_clear_pgd() 111 pgd_t *pgd; in sme_prepare_pgd() local 116 pgd = ppd->pgd + pgd_index(ppd->vaddr); in sme_prepare_pgd() 117 if (pgd_none(*pgd)) { in sme_prepare_pgd() 121 set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d))); in sme_prepare_pgd() 124 p4d = p4d_offset(pgd, ppd->vaddr); in sme_prepare_pgd() 384 ppd.pgd = (pgd_t *)native_read_cr3_pa(); in sme_encrypt_kernel() 399 ppd.pgd = ppd.pgtable_area; in sme_encrypt_kernel() 400 memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); in sme_encrypt_kernel() [all …]
|
| /linux/arch/riscv/mm/ |
| H A D | fault.c | 30 pgd_t *pgdp, pgd; in show_pte() local 42 mm == &init_mm ? (u64)__pa_symbol(mm->pgd) : virt_to_phys(mm->pgd)); in show_pte() 45 pgd = pgdp_get(pgdp); in show_pte() 46 pr_alert("[%016lx] pgd=%016lx", addr, pgd_val(pgd)); in show_pte() 47 if (pgd_none(pgd) || pgd_bad(pgd) || pgd_leaf(pgd)) in show_pte() 170 pgd_t *pgd, *pgd_k; in vmalloc_fault() local 192 pgd = (pgd_t *)pfn_to_virt(pfn) + index; in vmalloc_fault() 193 pgd_k = init_mm.pgd + index; in vmalloc_fault() 199 set_pgd(pgd, pgdp_get(pgd_k)); in vmalloc_fault()
|
| /linux/arch/parisc/mm/ |
| H A D | hugetlbpage.c | 29 pgd_t *pgd; in huge_pte_alloc() local 42 pgd = pgd_offset(mm, addr); in huge_pte_alloc() 43 p4d = p4d_offset(pgd, addr); in huge_pte_alloc() 56 pgd_t *pgd; in huge_pte_offset() local 64 pgd = pgd_offset(mm, addr); in huge_pte_offset() 65 if (!pgd_none(*pgd)) { in huge_pte_offset() 66 p4d = p4d_offset(pgd, addr); in huge_pte_offset()
|
| /linux/arch/m68k/include/asm/ |
| H A D | mmu_context.h | 67 static inline void set_context(mm_context_t context, pgd_t *pgd) in set_context() argument 76 set_context(tsk->mm->context, next->pgd); in switch_mm() 88 set_context(mm->context, mm->pgd); in activate_mm() 98 pgd_t *pgd; in load_ksp_mmu() local 124 pgd = pgd_offset(mm, mmuar); in load_ksp_mmu() 125 if (pgd_none(*pgd)) in load_ksp_mmu() 128 p4d = p4d_offset(pgd, mmuar); in load_ksp_mmu() 230 mm->context = virt_to_phys(mm->pgd); in init_new_context() 305 next_mm->context = virt_to_phys(next_mm->pgd); in activate_mm()
|