xref: /linux/arch/sparc/include/asm/pgtable_32.h (revision 6215d9f4470fbb48245ffdfade821685e2728c65)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _SPARC_PGTABLE_H
3 #define _SPARC_PGTABLE_H
4 
5 /*  asm/pgtable.h:  Defines and functions used to work
6  *                        with Sparc page tables.
7  *
8  *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
9  *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10  */
11 
12 #include <linux/const.h>
13 
14 #define PMD_SHIFT		18
15 #define PMD_SIZE        	(1UL << PMD_SHIFT)
16 #define PMD_MASK        	(~(PMD_SIZE-1))
17 #define PMD_ALIGN(__addr) 	(((__addr) + ~PMD_MASK) & PMD_MASK)
18 
19 #define PGDIR_SHIFT     	24
20 #define PGDIR_SIZE      	(1UL << PGDIR_SHIFT)
21 #define PGDIR_MASK      	(~(PGDIR_SIZE-1))
22 #define PGDIR_ALIGN(__addr) 	(((__addr) + ~PGDIR_MASK) & PGDIR_MASK)
23 
24 #ifndef __ASSEMBLER__
25 #include <asm-generic/pgtable-nopud.h>
26 
27 #include <linux/spinlock.h>
28 #include <linux/mm_types.h>
29 #include <asm/types.h>
30 #include <asm/pgtsrmmu.h>
31 #include <asm/vaddrs.h>
32 #include <asm/oplib.h>
33 #include <asm/cpu_type.h>
34 
35 
36 struct vm_area_struct;
37 struct page;
38 
39 void load_mmu(void);
40 unsigned long calc_highpages(void);
41 unsigned long __init bootmem_init(unsigned long *pages_avail);
42 
43 #define pte_ERROR(e)   __builtin_trap()
44 #define pmd_ERROR(e)   __builtin_trap()
45 #define pgd_ERROR(e)   __builtin_trap()
46 
47 #define PTRS_PER_PTE    	64
48 #define PTRS_PER_PMD    	64
49 #define PTRS_PER_PGD    	256
50 #define USER_PTRS_PER_PGD	PAGE_OFFSET / PGDIR_SIZE
51 #define PTE_SIZE		(PTRS_PER_PTE*4)
52 
53 #define PAGE_NONE	SRMMU_PAGE_NONE
54 #define PAGE_SHARED	SRMMU_PAGE_SHARED
55 #define PAGE_COPY	SRMMU_PAGE_COPY
56 #define PAGE_READONLY	SRMMU_PAGE_RDONLY
57 #define PAGE_KERNEL	SRMMU_PAGE_KERNEL
58 
59 /* Top-level page directory - dummy used by init-mm.
60  * srmmu.c will assign the real one (which is dynamically sized) */
61 #define swapper_pg_dir NULL
62 
63 void paging_init(void);
64 
65 extern unsigned long ptr_in_current_pgd;
66 
67 /* First physical page can be anywhere, the following is needed so that
68  * va-->pa and vice versa conversions work properly without performance
69  * hit for all __pa()/__va() operations.
70  */
71 extern unsigned long phys_base;
72 extern unsigned long pfn_base;
73 
74 /*
75  * In general all page table modifications should use the V8 atomic
76  * swap instruction.  This insures the mmu and the cpu are in sync
77  * with respect to ref/mod bits in the page tables.
78  */
79 static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
80 {
81 	__asm__ __volatile__("swap [%2], %0" :
82 			"=&r" (value) : "0" (value), "r" (addr) : "memory");
83 	return value;
84 }
85 
86 /* Certain architectures need to do special things when pte's
87  * within a page table are directly modified.  Thus, the following
88  * hook is made available.
89  */
90 
91 static inline void set_pte(pte_t *ptep, pte_t pteval)
92 {
93 	srmmu_swap((unsigned long *)ptep, pte_val(pteval));
94 }
95 
96 static inline int srmmu_device_memory(unsigned long x)
97 {
98 	return ((x & 0xF0000000) != 0);
99 }
100 
101 static inline unsigned long pmd_pfn(pmd_t pmd)
102 {
103 	return (pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4);
104 }
105 
106 static inline struct page *pmd_page(pmd_t pmd)
107 {
108 	if (srmmu_device_memory(pmd_val(pmd)))
109 		BUG();
110 	return pfn_to_page(pmd_pfn(pmd));
111 }
112 
113 static inline unsigned long __pmd_page(pmd_t pmd)
114 {
115 	unsigned long v;
116 
117 	if (srmmu_device_memory(pmd_val(pmd)))
118 		BUG();
119 
120 	v = pmd_val(pmd) & SRMMU_PTD_PMASK;
121 	return (unsigned long)__nocache_va(v << 4);
122 }
123 
124 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
125 {
126 	unsigned long v = pmd_val(pmd) & SRMMU_PTD_PMASK;
127 	return (unsigned long)__nocache_va(v << 4);
128 }
129 
130 static inline pmd_t *pud_pgtable(pud_t pud)
131 {
132 	if (srmmu_device_memory(pud_val(pud))) {
133 		return (pmd_t *)~0;
134 	} else {
135 		unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK;
136 		return (pmd_t *)__nocache_va(v << 4);
137 	}
138 }
139 
140 static inline int pte_present(pte_t pte)
141 {
142 	return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
143 }
144 
145 static inline int pte_none(pte_t pte)
146 {
147 	return !pte_val(pte);
148 }
149 
150 static inline void __pte_clear(pte_t *ptep)
151 {
152 	set_pte(ptep, __pte(0));
153 }
154 
155 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
156 {
157 	__pte_clear(ptep);
158 }
159 
160 static inline int pmd_bad(pmd_t pmd)
161 {
162 	return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
163 }
164 
165 static inline int pmd_present(pmd_t pmd)
166 {
167 	return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
168 }
169 
170 static inline int pmd_none(pmd_t pmd)
171 {
172 	return !pmd_val(pmd);
173 }
174 
175 static inline void pmd_clear(pmd_t *pmdp)
176 {
177 	set_pte((pte_t *)&pmd_val(*pmdp), __pte(0));
178 }
179 
180 static inline int pud_none(pud_t pud)
181 {
182 	return !(pud_val(pud) & 0xFFFFFFF);
183 }
184 
185 static inline int pud_bad(pud_t pud)
186 {
187 	return (pud_val(pud) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
188 }
189 
190 static inline int pud_present(pud_t pud)
191 {
192 	return ((pud_val(pud) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
193 }
194 
195 static inline void pud_clear(pud_t *pudp)
196 {
197 	set_pte((pte_t *)pudp, __pte(0));
198 }
199 
200 /*
201  * The following only work if pte_present() is true.
202  * Undefined behaviour if not..
203  */
204 static inline int pte_write(pte_t pte)
205 {
206 	return pte_val(pte) & SRMMU_WRITE;
207 }
208 
209 static inline int pte_dirty(pte_t pte)
210 {
211 	return pte_val(pte) & SRMMU_DIRTY;
212 }
213 
214 static inline int pte_young(pte_t pte)
215 {
216 	return pte_val(pte) & SRMMU_REF;
217 }
218 
219 static inline pte_t pte_wrprotect(pte_t pte)
220 {
221 	return __pte(pte_val(pte) & ~SRMMU_WRITE);
222 }
223 
224 static inline pte_t pte_mkclean(pte_t pte)
225 {
226 	return __pte(pte_val(pte) & ~SRMMU_DIRTY);
227 }
228 
229 static inline pte_t pte_mkold(pte_t pte)
230 {
231 	return __pte(pte_val(pte) & ~SRMMU_REF);
232 }
233 
234 static inline pte_t pte_mkwrite_novma(pte_t pte)
235 {
236 	return __pte(pte_val(pte) | SRMMU_WRITE);
237 }
238 
239 static inline pte_t pte_mkdirty(pte_t pte)
240 {
241 	return __pte(pte_val(pte) | SRMMU_DIRTY);
242 }
243 
244 static inline pte_t pte_mkyoung(pte_t pte)
245 {
246 	return __pte(pte_val(pte) | SRMMU_REF);
247 }
248 
249 #define PFN_PTE_SHIFT			(PAGE_SHIFT - 4)
250 
251 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
252 {
253 	return __pte((pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot));
254 }
255 
256 static inline unsigned long pte_pfn(pte_t pte)
257 {
258 	if (srmmu_device_memory(pte_val(pte))) {
259 		/* Just return something that will cause
260 		 * pfn_valid() to return false.  This makes
261 		 * copy_one_pte() to just directly copy to
262 		 * PTE over.
263 		 */
264 		return ~0UL;
265 	}
266 	return (pte_val(pte) & SRMMU_PTE_PMASK) >> PFN_PTE_SHIFT;
267 }
268 
269 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
270 
271 static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
272 {
273 	return __pte(((page) >> 4) | pgprot_val(pgprot));
274 }
275 
276 static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
277 {
278 	return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
279 }
280 
281 #define pgprot_noncached pgprot_noncached
282 static inline pgprot_t pgprot_noncached(pgprot_t prot)
283 {
284 	pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
285 	return prot;
286 }
287 
288 static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
289 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
290 {
291 	return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
292 		pgprot_val(newprot));
293 }
294 
295 /* only used by the huge vmap code, should never be called */
296 #define pud_page(pud)			NULL
297 
298 struct seq_file;
299 void mmu_info(struct seq_file *m);
300 
301 /* Fault handler stuff... */
302 #define FAULT_CODE_PROT     0x1
303 #define FAULT_CODE_WRITE    0x2
304 #define FAULT_CODE_USER     0x4
305 
306 #define update_mmu_cache(vma, address, ptep) do { } while (0)
307 #define update_mmu_cache_range(vmf, vma, address, ptep, nr) do { } while (0)
308 
309 void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
310                       unsigned long xva, unsigned int len);
311 void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
312 
313 /*
314  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
315  * are !pte_none() && !pte_present().
316  *
317  * Format of swap PTEs:
318  *
319  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
320  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
321  *   <-------------- offset ---------------> < type -> E 0 0 0 0 0 0
322  */
323 static inline unsigned long __swp_type(swp_entry_t entry)
324 {
325 	return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
326 }
327 
328 static inline unsigned long __swp_offset(swp_entry_t entry)
329 {
330 	return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
331 }
332 
333 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
334 {
335 	return (swp_entry_t) {
336 		(type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
337 		| (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
338 }
339 
340 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
341 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
342 
343 static inline bool pte_swp_exclusive(pte_t pte)
344 {
345 	return pte_val(pte) & SRMMU_SWP_EXCLUSIVE;
346 }
347 
348 static inline pte_t pte_swp_mkexclusive(pte_t pte)
349 {
350 	return __pte(pte_val(pte) | SRMMU_SWP_EXCLUSIVE);
351 }
352 
353 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
354 {
355 	return __pte(pte_val(pte) & ~SRMMU_SWP_EXCLUSIVE);
356 }
357 
358 static inline unsigned long
359 __get_phys (unsigned long addr)
360 {
361 	switch (sparc_cpu_model){
362 	case sun4m:
363 	case sun4d:
364 		return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
365 	default:
366 		return 0;
367 	}
368 }
369 
370 static inline int
371 __get_iospace (unsigned long addr)
372 {
373 	switch (sparc_cpu_model){
374 	case sun4m:
375 	case sun4d:
376 		return (srmmu_get_pte (addr) >> 28);
377 	default:
378 		return -1;
379 	}
380 }
381 
382 /*
383  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
384  * its high 4 bits.  These macros/functions put it there or get it from there.
385  */
386 #define MK_IOSPACE_PFN(space, pfn)	(pfn | (space << (BITS_PER_LONG - 4)))
387 #define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
388 #define GET_PFN(pfn)			(pfn & 0x0fffffffUL)
389 
390 static inline unsigned long io_remap_pfn_range_pfn(unsigned long pfn,
391 		unsigned long size)
392 {
393 	unsigned long long offset, space, phys_base;
394 
395 	offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
396 	space = GET_IOSPACE(pfn);
397 	phys_base = offset | (space << 32ULL);
398 
399 	return phys_base >> PAGE_SHIFT;
400 }
401 #define io_remap_pfn_range_pfn io_remap_pfn_range_pfn
402 
403 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
404 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
405 ({									  \
406 	int __changed = !pte_same(*(__ptep), __entry);			  \
407 	if (__changed) {						  \
408 		set_pte(__ptep, __entry);				  \
409 		flush_tlb_page(__vma, __address);			  \
410 	}								  \
411 	__changed;							  \
412 })
413 
414 #endif /* !(__ASSEMBLER__) */
415 
416 #define VMALLOC_START           _AC(0xfe600000,UL)
417 #define VMALLOC_END             _AC(0xffc00000,UL)
418 #define MODULES_VADDR           VMALLOC_START
419 #define MODULES_END             VMALLOC_END
420 
421 /* We provide our own get_unmapped_area to cope with VA holes for userland */
422 #define HAVE_ARCH_UNMAPPED_AREA
423 
424 #define pmd_pgtable(pmd)	((pgtable_t)__pmd_page(pmd))
425 
426 #endif /* !(_SPARC_PGTABLE_H) */
427