1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_PGTABLE_H 3 #define _ASM_POWERPC_PGTABLE_H 4 5 #ifndef __ASSEMBLY__ 6 #include <linux/mmdebug.h> 7 #include <linux/mmzone.h> 8 #include <asm/processor.h> /* For TASK_SIZE */ 9 #include <asm/mmu.h> 10 #include <asm/page.h> 11 #include <asm/tlbflush.h> 12 13 struct mm_struct; 14 15 #endif /* !__ASSEMBLY__ */ 16 17 #ifdef CONFIG_PPC_BOOK3S 18 #include <asm/book3s/pgtable.h> 19 #else 20 #include <asm/nohash/pgtable.h> 21 #endif /* !CONFIG_PPC_BOOK3S */ 22 23 /* 24 * Protection used for kernel text. We want the debuggers to be able to 25 * set breakpoints anywhere, so don't write protect the kernel text 26 * on platforms where such control is possible. 27 */ 28 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \ 29 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) 30 #define PAGE_KERNEL_TEXT PAGE_KERNEL_X 31 #else 32 #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX 33 #endif 34 35 /* Make modules code happy. We don't set RO yet */ 36 #define PAGE_KERNEL_EXEC PAGE_KERNEL_X 37 38 /* Advertise special mapping type for AGP */ 39 #define PAGE_AGP (PAGE_KERNEL_NC) 40 #define HAVE_PAGE_AGP 41 42 #ifndef __ASSEMBLY__ 43 44 #define PFN_PTE_SHIFT PTE_RPN_SHIFT 45 46 void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 47 pte_t pte, unsigned int nr); 48 #define set_ptes set_ptes 49 #define update_mmu_cache(vma, addr, ptep) \ 50 update_mmu_cache_range(NULL, vma, addr, ptep, 1) 51 52 #ifndef MAX_PTRS_PER_PGD 53 #define MAX_PTRS_PER_PGD PTRS_PER_PGD 54 #endif 55 56 /* Keep these as a macros to avoid include dependency mess */ 57 #define pte_page(x) pfn_to_page(pte_pfn(x)) 58 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 59 60 static inline unsigned long pte_pfn(pte_t pte) 61 { 62 return (pte_val(pte) & PTE_RPN_MASK) >> PTE_RPN_SHIFT; 63 } 64 65 /* 66 * Select all bits except the pfn 67 */ 68 static inline pgprot_t pte_pgprot(pte_t pte) 69 { 70 unsigned long pte_flags; 71 72 pte_flags = pte_val(pte) & ~PTE_RPN_MASK; 73 return __pgprot(pte_flags); 74 } 75 76 static inline pgprot_t pgprot_nx(pgprot_t prot) 77 { 78 return pte_pgprot(pte_exprotect(__pte(pgprot_val(prot)))); 79 } 80 #define pgprot_nx pgprot_nx 81 82 #ifndef pmd_page_vaddr 83 static inline const void *pmd_page_vaddr(pmd_t pmd) 84 { 85 return __va(pmd_val(pmd) & ~PMD_MASKED_BITS); 86 } 87 #define pmd_page_vaddr pmd_page_vaddr 88 #endif 89 /* 90 * ZERO_PAGE is a global shared page that is always zero: used 91 * for zero-mapped memory areas etc.. 92 */ 93 extern unsigned long empty_zero_page[]; 94 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 95 96 extern pgd_t swapper_pg_dir[]; 97 98 extern void paging_init(void); 99 void poking_init(void); 100 101 extern unsigned long ioremap_bot; 102 extern const pgprot_t protection_map[16]; 103 104 /* can we use this in kvm */ 105 unsigned long vmalloc_to_phys(void *vmalloc_addr); 106 107 void pgtable_cache_add(unsigned int shift); 108 109 pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va); 110 111 #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32) 112 void mark_initmem_nx(void); 113 #else 114 static inline void mark_initmem_nx(void) { } 115 #endif 116 117 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 118 int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, 119 pte_t *ptep, pte_t entry, int dirty); 120 121 pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size, 122 pgprot_t vma_prot); 123 124 struct file; 125 static inline pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 126 unsigned long size, pgprot_t vma_prot) 127 { 128 return __phys_mem_access_prot(pfn, size, vma_prot); 129 } 130 #define __HAVE_PHYS_MEM_ACCESS_PROT 131 132 void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); 133 134 /* 135 * This gets called at the end of handling a page fault, when 136 * the kernel has put a new PTE into the page table for the process. 137 * We use it to ensure coherency between the i-cache and d-cache 138 * for the page which has just been mapped in. 139 * On machines which use an MMU hash table, we use this to put a 140 * corresponding HPTE into the hash table ahead of time, instead of 141 * waiting for the inevitable extra hash-table miss exception. 142 */ 143 static inline void update_mmu_cache_range(struct vm_fault *vmf, 144 struct vm_area_struct *vma, unsigned long address, 145 pte_t *ptep, unsigned int nr) 146 { 147 if ((mmu_has_feature(MMU_FTR_HPTE_TABLE) && !radix_enabled()) || 148 (IS_ENABLED(CONFIG_PPC_E500) && IS_ENABLED(CONFIG_HUGETLB_PAGE))) 149 __update_mmu_cache(vma, address, ptep); 150 } 151 152 /* 153 * When used, PTE_FRAG_NR is defined in subarch pgtable.h 154 * so we are sure it is included when arriving here. 155 */ 156 #ifdef PTE_FRAG_NR 157 static inline void *pte_frag_get(mm_context_t *ctx) 158 { 159 return ctx->pte_frag; 160 } 161 162 static inline void pte_frag_set(mm_context_t *ctx, void *p) 163 { 164 ctx->pte_frag = p; 165 } 166 #else 167 #define PTE_FRAG_NR 1 168 #define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT 169 #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) 170 171 static inline void *pte_frag_get(mm_context_t *ctx) 172 { 173 return NULL; 174 } 175 176 static inline void pte_frag_set(mm_context_t *ctx, void *p) 177 { 178 } 179 #endif 180 181 #define pmd_pgtable pmd_pgtable 182 static inline pgtable_t pmd_pgtable(pmd_t pmd) 183 { 184 return (pgtable_t)pmd_page_vaddr(pmd); 185 } 186 187 #ifdef CONFIG_PPC64 188 int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size); 189 bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start, 190 unsigned long page_size); 191 /* 192 * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details 193 * some of the restrictions. We don't check for PMD_SIZE because our 194 * vmemmap allocation code can fallback correctly. The pageblock 195 * alignment requirement is met using altmap->reserve blocks. 196 */ 197 #define arch_supports_memmap_on_memory arch_supports_memmap_on_memory 198 static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size) 199 { 200 if (!radix_enabled()) 201 return false; 202 /* 203 * With 4K page size and 2M PMD_SIZE, we can align 204 * things better with memory block size value 205 * starting from 128MB. Hence align things with PMD_SIZE. 206 */ 207 if (IS_ENABLED(CONFIG_PPC_4K_PAGES)) 208 return IS_ALIGNED(vmemmap_size, PMD_SIZE); 209 return true; 210 } 211 212 #endif /* CONFIG_PPC64 */ 213 214 #endif /* __ASSEMBLY__ */ 215 216 #endif /* _ASM_POWERPC_PGTABLE_H */ 217