1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * S390 version 4 * Copyright IBM Corp. 1999, 2000 5 * Author(s): Hartmut Penner (hp@de.ibm.com) 6 */ 7 8 #ifndef _S390_PAGE_H 9 #define _S390_PAGE_H 10 11 #include <linux/const.h> 12 #include <asm/types.h> 13 #include <asm/asm.h> 14 15 #include <vdso/page.h> 16 17 #define PAGE_DEFAULT_ACC _AC(0, UL) 18 /* storage-protection override */ 19 #define PAGE_SPO_ACC 9 20 #define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4) 21 22 #define HPAGE_SHIFT 20 23 #define HPAGE_SIZE (1UL << HPAGE_SHIFT) 24 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 25 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 26 #define HUGE_MAX_HSTATE 2 27 28 #define ARCH_HAS_SETCLEAR_HUGE_PTE 29 #define ARCH_HAS_HUGE_PTE_TYPE 30 #define ARCH_HAS_PREPARE_HUGEPAGE 31 #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH 32 33 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 34 35 #include <asm/setup.h> 36 #ifndef __ASSEMBLY__ 37 38 void __storage_key_init_range(unsigned long start, unsigned long end); 39 40 static inline void storage_key_init_range(unsigned long start, unsigned long end) 41 { 42 if (PAGE_DEFAULT_KEY != 0) 43 __storage_key_init_range(start, end); 44 } 45 46 #define clear_page(page) memset((page), 0, PAGE_SIZE) 47 48 /* 49 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to 50 * bypass caches when copying a page. Especially when copying huge pages 51 * this keeps L1 and L2 data caches alive. 52 */ 53 static inline void copy_page(void *to, void *from) 54 { 55 union register_pair dst, src; 56 57 dst.even = (unsigned long) to; 58 dst.odd = 0x1000; 59 src.even = (unsigned long) from; 60 src.odd = 0xb0001000; 61 62 asm volatile( 63 " mvcl %[dst],%[src]" 64 : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair) 65 : : "memory", "cc"); 66 } 67 68 #define clear_user_page(page, vaddr, pg) clear_page(page) 69 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 70 71 #define vma_alloc_zeroed_movable_folio(vma, vaddr) \ 72 vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false) 73 74 /* 75 * These are used to make use of C type-checking.. 76 */ 77 78 typedef struct { unsigned long pgprot; } pgprot_t; 79 typedef struct { unsigned long pgste; } pgste_t; 80 typedef struct { unsigned long pte; } pte_t; 81 typedef struct { unsigned long pmd; } pmd_t; 82 typedef struct { unsigned long pud; } pud_t; 83 typedef struct { unsigned long p4d; } p4d_t; 84 typedef struct { unsigned long pgd; } pgd_t; 85 typedef pte_t *pgtable_t; 86 87 #define pgprot_val(x) ((x).pgprot) 88 #define pgste_val(x) ((x).pgste) 89 90 static inline unsigned long pte_val(pte_t pte) 91 { 92 return pte.pte; 93 } 94 95 static inline unsigned long pmd_val(pmd_t pmd) 96 { 97 return pmd.pmd; 98 } 99 100 static inline unsigned long pud_val(pud_t pud) 101 { 102 return pud.pud; 103 } 104 105 static inline unsigned long p4d_val(p4d_t p4d) 106 { 107 return p4d.p4d; 108 } 109 110 static inline unsigned long pgd_val(pgd_t pgd) 111 { 112 return pgd.pgd; 113 } 114 115 #define __pgste(x) ((pgste_t) { (x) } ) 116 #define __pte(x) ((pte_t) { (x) } ) 117 #define __pmd(x) ((pmd_t) { (x) } ) 118 #define __pud(x) ((pud_t) { (x) } ) 119 #define __p4d(x) ((p4d_t) { (x) } ) 120 #define __pgd(x) ((pgd_t) { (x) } ) 121 #define __pgprot(x) ((pgprot_t) { (x) } ) 122 123 static inline void page_set_storage_key(unsigned long addr, 124 unsigned char skey, int mapped) 125 { 126 if (!mapped) 127 asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0" 128 : : "d" (skey), "a" (addr)); 129 else 130 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); 131 } 132 133 static inline unsigned char page_get_storage_key(unsigned long addr) 134 { 135 unsigned char skey; 136 137 asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr)); 138 return skey; 139 } 140 141 static inline int page_reset_referenced(unsigned long addr) 142 { 143 int cc; 144 145 asm volatile( 146 " rrbe 0,%[addr]\n" 147 CC_IPM(cc) 148 : CC_OUT(cc, cc) 149 : [addr] "a" (addr) 150 : CC_CLOBBER); 151 return CC_TRANSFORM(cc); 152 } 153 154 /* Bits int the storage key */ 155 #define _PAGE_CHANGED 0x02 /* HW changed bit */ 156 #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ 157 #define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */ 158 #define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ 159 160 struct page; 161 struct folio; 162 void arch_free_page(struct page *page, int order); 163 void arch_alloc_page(struct page *page, int order); 164 165 static inline int devmem_is_allowed(unsigned long pfn) 166 { 167 return 0; 168 } 169 170 #define HAVE_ARCH_FREE_PAGE 171 #define HAVE_ARCH_ALLOC_PAGE 172 173 int arch_make_folio_accessible(struct folio *folio); 174 #define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE 175 176 struct vm_layout { 177 unsigned long kaslr_offset; 178 unsigned long kaslr_offset_phys; 179 unsigned long identity_base; 180 unsigned long identity_size; 181 }; 182 183 extern struct vm_layout vm_layout; 184 185 #define __kaslr_offset vm_layout.kaslr_offset 186 #define __kaslr_offset_phys vm_layout.kaslr_offset_phys 187 #define __identity_base vm_layout.identity_base 188 #define ident_map_size vm_layout.identity_size 189 190 static inline unsigned long kaslr_offset(void) 191 { 192 return __kaslr_offset; 193 } 194 195 extern int __kaslr_enabled; 196 static inline int kaslr_enabled(void) 197 { 198 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) 199 return __kaslr_enabled; 200 return 0; 201 } 202 203 #define __PAGE_OFFSET __identity_base 204 #define PAGE_OFFSET __PAGE_OFFSET 205 206 #ifdef __DECOMPRESSOR 207 208 #define __pa_nodebug(x) ((unsigned long)(x)) 209 #define __pa(x) __pa_nodebug(x) 210 #define __pa32(x) __pa(x) 211 #define __va(x) ((void *)(unsigned long)(x)) 212 213 #else /* __DECOMPRESSOR */ 214 215 static inline unsigned long __pa_nodebug(unsigned long x) 216 { 217 if (x < __kaslr_offset) 218 return x - __identity_base; 219 return x - __kaslr_offset + __kaslr_offset_phys; 220 } 221 222 #ifdef CONFIG_DEBUG_VIRTUAL 223 224 unsigned long __phys_addr(unsigned long x, bool is_31bit); 225 226 #else /* CONFIG_DEBUG_VIRTUAL */ 227 228 static inline unsigned long __phys_addr(unsigned long x, bool is_31bit) 229 { 230 return __pa_nodebug(x); 231 } 232 233 #endif /* CONFIG_DEBUG_VIRTUAL */ 234 235 #define __pa(x) __phys_addr((unsigned long)(x), false) 236 #define __pa32(x) __phys_addr((unsigned long)(x), true) 237 #define __va(x) ((void *)((unsigned long)(x) + __identity_base)) 238 239 #endif /* __DECOMPRESSOR */ 240 241 #define phys_to_pfn(phys) ((phys) >> PAGE_SHIFT) 242 #define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) 243 244 #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) 245 #define phys_to_folio(phys) page_folio(phys_to_page(phys)) 246 #define page_to_phys(page) pfn_to_phys(page_to_pfn(page)) 247 #define folio_to_phys(page) pfn_to_phys(folio_pfn(folio)) 248 249 static inline void *pfn_to_virt(unsigned long pfn) 250 { 251 return __va(pfn_to_phys(pfn)); 252 } 253 254 static inline unsigned long virt_to_pfn(const void *kaddr) 255 { 256 return phys_to_pfn(__pa(kaddr)); 257 } 258 259 #define pfn_to_kaddr(pfn) pfn_to_virt(pfn) 260 261 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) 262 #define page_to_virt(page) pfn_to_virt(page_to_pfn(page)) 263 264 #define virt_addr_valid(kaddr) pfn_valid(phys_to_pfn(__pa_nodebug((unsigned long)(kaddr)))) 265 266 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC 267 268 #endif /* !__ASSEMBLY__ */ 269 270 #include <asm-generic/memory_model.h> 271 #include <asm-generic/getorder.h> 272 273 #define AMODE31_SIZE (3 * PAGE_SIZE) 274 275 #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) 276 #define __NO_KASLR_START_KERNEL CONFIG_KERNEL_IMAGE_BASE 277 #define __NO_KASLR_END_KERNEL (__NO_KASLR_START_KERNEL + KERNEL_IMAGE_SIZE) 278 279 #define TEXT_OFFSET 0x100000 280 281 #endif /* _S390_PAGE_H */ 282