1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H 3 #define _ASM_POWERPC_BOOK3S_64_HASH_H 4 #ifdef __KERNEL__ 5 6 /* 7 * Common bits between 4K and 64K pages in a linux-style PTE. 8 * Additional bits may be defined in pgtable-hash64-*.h 9 * 10 */ 11 #define H_PTE_NONE_MASK _PAGE_HPTEFLAGS 12 13 #ifdef CONFIG_PPC_64K_PAGES 14 #include <asm/book3s/64/hash-64k.h> 15 #else 16 #include <asm/book3s/64/hash-4k.h> 17 #endif 18 19 /* 20 * Size of EA range mapped by our pagetables. 21 */ 22 #define H_PGTABLE_EADDR_SIZE (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \ 23 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT) 24 #define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE) 25 26 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_PPC_64K_PAGES) 27 /* 28 * only with hash 64k we need to use the second half of pmd page table 29 * to store pointer to deposited pgtable_t 30 */ 31 #define H_PMD_CACHE_INDEX (H_PMD_INDEX_SIZE + 1) 32 #else 33 #define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE 34 #endif 35 /* 36 * Define the address range of the kernel non-linear virtual area 37 */ 38 #define H_KERN_VIRT_START ASM_CONST(0xD000000000000000) 39 #define H_KERN_VIRT_SIZE ASM_CONST(0x0000400000000000) /* 64T */ 40 41 /* 42 * The vmalloc space starts at the beginning of that region, and 43 * occupies half of it on hash CPUs and a quarter of it on Book3E 44 * (we keep a quarter for the virtual memmap) 45 */ 46 #define H_VMALLOC_START H_KERN_VIRT_START 47 #define H_VMALLOC_SIZE ASM_CONST(0x380000000000) /* 56T */ 48 #define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE) 49 50 #define H_KERN_IO_START H_VMALLOC_END 51 52 /* 53 * Region IDs 54 */ 55 #define REGION_SHIFT 60UL 56 #define REGION_MASK (0xfUL << REGION_SHIFT) 57 #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) 58 59 #define VMALLOC_REGION_ID (REGION_ID(H_VMALLOC_START)) 60 #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) 61 #define VMEMMAP_REGION_ID (0xfUL) /* Server only */ 62 #define USER_REGION_ID (0UL) 63 64 /* 65 * Defines the address of the vmemap area, in its own region on 66 * hash table CPUs. 67 */ 68 #define H_VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) 69 70 #ifdef CONFIG_PPC_MM_SLICES 71 #define HAVE_ARCH_UNMAPPED_AREA 72 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 73 #endif /* CONFIG_PPC_MM_SLICES */ 74 75 76 /* PTEIDX nibble */ 77 #define _PTEIDX_SECONDARY 0x8 78 #define _PTEIDX_GROUP_IX 0x7 79 80 #define H_PMD_BAD_BITS (PTE_TABLE_SIZE-1) 81 #define H_PUD_BAD_BITS (PMD_TABLE_SIZE-1) 82 83 #ifndef __ASSEMBLY__ 84 #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS) 85 #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS) 86 static inline int hash__pgd_bad(pgd_t pgd) 87 { 88 return (pgd_val(pgd) == 0); 89 } 90 #ifdef CONFIG_STRICT_KERNEL_RWX 91 extern void hash__mark_rodata_ro(void); 92 extern void hash__mark_initmem_nx(void); 93 #endif 94 95 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 96 pte_t *ptep, unsigned long pte, int huge); 97 extern unsigned long htab_convert_pte_flags(unsigned long pteflags); 98 /* Atomic PTE updates */ 99 static inline unsigned long hash__pte_update(struct mm_struct *mm, 100 unsigned long addr, 101 pte_t *ptep, unsigned long clr, 102 unsigned long set, 103 int huge) 104 { 105 __be64 old_be, tmp_be; 106 unsigned long old; 107 108 __asm__ __volatile__( 109 "1: ldarx %0,0,%3 # pte_update\n\ 110 and. %1,%0,%6\n\ 111 bne- 1b \n\ 112 andc %1,%0,%4 \n\ 113 or %1,%1,%7\n\ 114 stdcx. %1,0,%3 \n\ 115 bne- 1b" 116 : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep) 117 : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep), 118 "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set)) 119 : "cc" ); 120 /* huge pages use the old page table lock */ 121 if (!huge) 122 assert_pte_locked(mm, addr); 123 124 old = be64_to_cpu(old_be); 125 if (old & H_PAGE_HASHPTE) 126 hpte_need_flush(mm, addr, ptep, old, huge); 127 128 return old; 129 } 130 131 /* Set the dirty and/or accessed bits atomically in a linux PTE, this 132 * function doesn't need to flush the hash entry 133 */ 134 static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry) 135 { 136 __be64 old, tmp, val, mask; 137 138 mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE | 139 _PAGE_EXEC | _PAGE_SOFT_DIRTY); 140 141 val = pte_raw(entry) & mask; 142 143 __asm__ __volatile__( 144 "1: ldarx %0,0,%4\n\ 145 and. %1,%0,%6\n\ 146 bne- 1b \n\ 147 or %0,%3,%0\n\ 148 stdcx. %0,0,%4\n\ 149 bne- 1b" 150 :"=&r" (old), "=&r" (tmp), "=m" (*ptep) 151 :"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY)) 152 :"cc"); 153 } 154 155 static inline int hash__pte_same(pte_t pte_a, pte_t pte_b) 156 { 157 return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); 158 } 159 160 static inline int hash__pte_none(pte_t pte) 161 { 162 return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0; 163 } 164 165 unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift, 166 int ssize, real_pte_t rpte, unsigned int subpg_index); 167 168 /* This low level function performs the actual PTE insertion 169 * Setting the PTE depends on the MMU type and other factors. It's 170 * an horrible mess that I'm not going to try to clean up now but 171 * I'm keeping it in one place rather than spread around 172 */ 173 static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr, 174 pte_t *ptep, pte_t pte, int percpu) 175 { 176 /* 177 * Anything else just stores the PTE normally. That covers all 64-bit 178 * cases, and 32-bit non-hash with 32-bit PTEs. 179 */ 180 *ptep = pte; 181 } 182 183 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 184 extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, 185 pmd_t *pmdp, unsigned long old_pmd); 186 #else 187 static inline void hpte_do_hugepage_flush(struct mm_struct *mm, 188 unsigned long addr, pmd_t *pmdp, 189 unsigned long old_pmd) 190 { 191 WARN(1, "%s called with THP disabled\n", __func__); 192 } 193 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 194 195 196 extern int hash__map_kernel_page(unsigned long ea, unsigned long pa, 197 unsigned long flags); 198 extern int __meminit hash__vmemmap_create_mapping(unsigned long start, 199 unsigned long page_size, 200 unsigned long phys); 201 extern void hash__vmemmap_remove_mapping(unsigned long start, 202 unsigned long page_size); 203 204 int hash__create_section_mapping(unsigned long start, unsigned long end); 205 int hash__remove_section_mapping(unsigned long start, unsigned long end); 206 207 #endif /* !__ASSEMBLY__ */ 208 #endif /* __KERNEL__ */ 209 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ 210