xref: /linux/arch/sparc/include/asm/pgtable_32.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 #ifndef _SPARC_PGTABLE_H
2 #define _SPARC_PGTABLE_H
3 
4 /*  asm/pgtable.h:  Defines and functions used to work
5  *                        with Sparc page tables.
6  *
7  *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8  *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9  */
10 
11 #include <linux/const.h>
12 
13 #ifndef __ASSEMBLY__
14 #include <asm-generic/4level-fixup.h>
15 
16 #include <linux/spinlock.h>
17 #include <linux/mm_types.h>
18 #include <asm/types.h>
19 #include <asm/pgtsrmmu.h>
20 #include <asm/vaddrs.h>
21 #include <asm/oplib.h>
22 #include <asm/cpu_type.h>
23 
24 
25 struct vm_area_struct;
26 struct page;
27 
28 void load_mmu(void);
29 unsigned long calc_highpages(void);
30 unsigned long __init bootmem_init(unsigned long *pages_avail);
31 
32 #define pte_ERROR(e)   __builtin_trap()
33 #define pmd_ERROR(e)   __builtin_trap()
34 #define pgd_ERROR(e)   __builtin_trap()
35 
36 #define PMD_SHIFT		22
37 #define PMD_SIZE        	(1UL << PMD_SHIFT)
38 #define PMD_MASK        	(~(PMD_SIZE-1))
39 #define PMD_ALIGN(__addr) 	(((__addr) + ~PMD_MASK) & PMD_MASK)
40 #define PGDIR_SHIFT     	SRMMU_PGDIR_SHIFT
41 #define PGDIR_SIZE      	SRMMU_PGDIR_SIZE
42 #define PGDIR_MASK      	SRMMU_PGDIR_MASK
43 #define PTRS_PER_PTE    	1024
44 #define PTRS_PER_PMD    	SRMMU_PTRS_PER_PMD
45 #define PTRS_PER_PGD    	SRMMU_PTRS_PER_PGD
46 #define USER_PTRS_PER_PGD	PAGE_OFFSET / SRMMU_PGDIR_SIZE
47 #define FIRST_USER_ADDRESS	0UL
48 #define PTE_SIZE		(PTRS_PER_PTE*4)
49 
50 #define PAGE_NONE	SRMMU_PAGE_NONE
51 #define PAGE_SHARED	SRMMU_PAGE_SHARED
52 #define PAGE_COPY	SRMMU_PAGE_COPY
53 #define PAGE_READONLY	SRMMU_PAGE_RDONLY
54 #define PAGE_KERNEL	SRMMU_PAGE_KERNEL
55 
56 /* Top-level page directory - dummy used by init-mm.
57  * srmmu.c will assign the real one (which is dynamically sized) */
58 #define swapper_pg_dir NULL
59 
60 void paging_init(void);
61 
62 extern unsigned long ptr_in_current_pgd;
63 
64 /*         xwr */
65 #define __P000  PAGE_NONE
66 #define __P001  PAGE_READONLY
67 #define __P010  PAGE_COPY
68 #define __P011  PAGE_COPY
69 #define __P100  PAGE_READONLY
70 #define __P101  PAGE_READONLY
71 #define __P110  PAGE_COPY
72 #define __P111  PAGE_COPY
73 
74 #define __S000	PAGE_NONE
75 #define __S001	PAGE_READONLY
76 #define __S010	PAGE_SHARED
77 #define __S011	PAGE_SHARED
78 #define __S100	PAGE_READONLY
79 #define __S101	PAGE_READONLY
80 #define __S110	PAGE_SHARED
81 #define __S111	PAGE_SHARED
82 
83 /* First physical page can be anywhere, the following is needed so that
84  * va-->pa and vice versa conversions work properly without performance
85  * hit for all __pa()/__va() operations.
86  */
87 extern unsigned long phys_base;
88 extern unsigned long pfn_base;
89 
90 /*
91  * ZERO_PAGE is a global shared page that is always zero: used
92  * for zero-mapped memory areas etc..
93  */
94 extern unsigned long empty_zero_page;
95 
96 #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
97 
98 /*
99  * In general all page table modifications should use the V8 atomic
100  * swap instruction.  This insures the mmu and the cpu are in sync
101  * with respect to ref/mod bits in the page tables.
102  */
103 static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
104 {
105 	__asm__ __volatile__("swap [%2], %0" :
106 			"=&r" (value) : "0" (value), "r" (addr) : "memory");
107 	return value;
108 }
109 
110 /* Certain architectures need to do special things when pte's
111  * within a page table are directly modified.  Thus, the following
112  * hook is made available.
113  */
114 
115 static inline void set_pte(pte_t *ptep, pte_t pteval)
116 {
117 	srmmu_swap((unsigned long *)ptep, pte_val(pteval));
118 }
119 
120 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
121 
122 static inline int srmmu_device_memory(unsigned long x)
123 {
124 	return ((x & 0xF0000000) != 0);
125 }
126 
127 static inline struct page *pmd_page(pmd_t pmd)
128 {
129 	if (srmmu_device_memory(pmd_val(pmd)))
130 		BUG();
131 	return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
132 }
133 
134 static inline unsigned long pgd_page_vaddr(pgd_t pgd)
135 {
136 	if (srmmu_device_memory(pgd_val(pgd))) {
137 		return ~0;
138 	} else {
139 		unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
140 		return (unsigned long)__nocache_va(v << 4);
141 	}
142 }
143 
144 static inline int pte_present(pte_t pte)
145 {
146 	return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
147 }
148 
149 static inline int pte_none(pte_t pte)
150 {
151 	return !pte_val(pte);
152 }
153 
154 static inline void __pte_clear(pte_t *ptep)
155 {
156 	set_pte(ptep, __pte(0));
157 }
158 
159 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
160 {
161 	__pte_clear(ptep);
162 }
163 
164 static inline int pmd_bad(pmd_t pmd)
165 {
166 	return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
167 }
168 
169 static inline int pmd_present(pmd_t pmd)
170 {
171 	return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
172 }
173 
174 static inline int pmd_none(pmd_t pmd)
175 {
176 	return !pmd_val(pmd);
177 }
178 
179 static inline void pmd_clear(pmd_t *pmdp)
180 {
181 	int i;
182 	for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
183 		set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
184 }
185 
186 static inline int pgd_none(pgd_t pgd)
187 {
188 	return !(pgd_val(pgd) & 0xFFFFFFF);
189 }
190 
191 static inline int pgd_bad(pgd_t pgd)
192 {
193 	return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
194 }
195 
196 static inline int pgd_present(pgd_t pgd)
197 {
198 	return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
199 }
200 
201 static inline void pgd_clear(pgd_t *pgdp)
202 {
203 	set_pte((pte_t *)pgdp, __pte(0));
204 }
205 
206 /*
207  * The following only work if pte_present() is true.
208  * Undefined behaviour if not..
209  */
210 static inline int pte_write(pte_t pte)
211 {
212 	return pte_val(pte) & SRMMU_WRITE;
213 }
214 
215 static inline int pte_dirty(pte_t pte)
216 {
217 	return pte_val(pte) & SRMMU_DIRTY;
218 }
219 
220 static inline int pte_young(pte_t pte)
221 {
222 	return pte_val(pte) & SRMMU_REF;
223 }
224 
225 static inline int pte_special(pte_t pte)
226 {
227 	return 0;
228 }
229 
230 static inline pte_t pte_wrprotect(pte_t pte)
231 {
232 	return __pte(pte_val(pte) & ~SRMMU_WRITE);
233 }
234 
235 static inline pte_t pte_mkclean(pte_t pte)
236 {
237 	return __pte(pte_val(pte) & ~SRMMU_DIRTY);
238 }
239 
240 static inline pte_t pte_mkold(pte_t pte)
241 {
242 	return __pte(pte_val(pte) & ~SRMMU_REF);
243 }
244 
245 static inline pte_t pte_mkwrite(pte_t pte)
246 {
247 	return __pte(pte_val(pte) | SRMMU_WRITE);
248 }
249 
250 static inline pte_t pte_mkdirty(pte_t pte)
251 {
252 	return __pte(pte_val(pte) | SRMMU_DIRTY);
253 }
254 
255 static inline pte_t pte_mkyoung(pte_t pte)
256 {
257 	return __pte(pte_val(pte) | SRMMU_REF);
258 }
259 
260 #define pte_mkspecial(pte)    (pte)
261 
262 #define pfn_pte(pfn, prot)		mk_pte(pfn_to_page(pfn), prot)
263 
264 static inline unsigned long pte_pfn(pte_t pte)
265 {
266 	if (srmmu_device_memory(pte_val(pte))) {
267 		/* Just return something that will cause
268 		 * pfn_valid() to return false.  This makes
269 		 * copy_one_pte() to just directly copy to
270 		 * PTE over.
271 		 */
272 		return ~0UL;
273 	}
274 	return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
275 }
276 
277 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
278 
279 /*
280  * Conversion functions: convert a page and protection to a page entry,
281  * and a page entry and page directory to the page they refer to.
282  */
283 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
284 {
285 	return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
286 }
287 
288 static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
289 {
290 	return __pte(((page) >> 4) | pgprot_val(pgprot));
291 }
292 
293 static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
294 {
295 	return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
296 }
297 
298 #define pgprot_noncached pgprot_noncached
299 static inline pgprot_t pgprot_noncached(pgprot_t prot)
300 {
301 	prot &= ~__pgprot(SRMMU_CACHE);
302 	return prot;
303 }
304 
305 static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
306 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
307 {
308 	return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
309 		pgprot_val(newprot));
310 }
311 
312 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
313 
314 /* to find an entry in a page-table-directory */
315 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
316 
317 /* to find an entry in a kernel page-table-directory */
318 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
319 
320 /* Find an entry in the second-level page table.. */
321 static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
322 {
323 	return (pmd_t *) pgd_page_vaddr(*dir) +
324 		((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
325 }
326 
327 /* Find an entry in the third-level page table.. */
328 pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
329 
330 /*
331  * This shortcut works on sun4m (and sun4d) because the nocache area is static.
332  */
333 #define pte_offset_map(d, a)		pte_offset_kernel(d,a)
334 #define pte_unmap(pte)		do{}while(0)
335 
336 struct seq_file;
337 void mmu_info(struct seq_file *m);
338 
339 /* Fault handler stuff... */
340 #define FAULT_CODE_PROT     0x1
341 #define FAULT_CODE_WRITE    0x2
342 #define FAULT_CODE_USER     0x4
343 
344 #define update_mmu_cache(vma, address, ptep) do { } while (0)
345 
346 void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
347                       unsigned long xva, unsigned int len);
348 void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
349 
350 /* Encode and de-code a swap entry */
351 static inline unsigned long __swp_type(swp_entry_t entry)
352 {
353 	return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
354 }
355 
356 static inline unsigned long __swp_offset(swp_entry_t entry)
357 {
358 	return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
359 }
360 
361 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
362 {
363 	return (swp_entry_t) {
364 		(type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
365 		| (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
366 }
367 
368 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
369 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
370 
371 static inline unsigned long
372 __get_phys (unsigned long addr)
373 {
374 	switch (sparc_cpu_model){
375 	case sun4m:
376 	case sun4d:
377 		return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
378 	default:
379 		return 0;
380 	}
381 }
382 
383 static inline int
384 __get_iospace (unsigned long addr)
385 {
386 	switch (sparc_cpu_model){
387 	case sun4m:
388 	case sun4d:
389 		return (srmmu_get_pte (addr) >> 28);
390 	default:
391 		return -1;
392 	}
393 }
394 
395 extern unsigned long *sparc_valid_addr_bitmap;
396 
397 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
398 #define kern_addr_valid(addr) \
399 	(test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
400 
401 /*
402  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
403  * its high 4 bits.  These macros/functions put it there or get it from there.
404  */
405 #define MK_IOSPACE_PFN(space, pfn)	(pfn | (space << (BITS_PER_LONG - 4)))
406 #define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
407 #define GET_PFN(pfn)			(pfn & 0x0fffffffUL)
408 
409 int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
410 		    unsigned long, pgprot_t);
411 
412 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
413 				     unsigned long from, unsigned long pfn,
414 				     unsigned long size, pgprot_t prot)
415 {
416 	unsigned long long offset, space, phys_base;
417 
418 	offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
419 	space = GET_IOSPACE(pfn);
420 	phys_base = offset | (space << 32ULL);
421 
422 	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
423 }
424 #define io_remap_pfn_range io_remap_pfn_range
425 
426 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
427 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
428 ({									  \
429 	int __changed = !pte_same(*(__ptep), __entry);			  \
430 	if (__changed) {						  \
431 		set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
432 		flush_tlb_page(__vma, __address);			  \
433 	}								  \
434 	__changed;							  \
435 })
436 
437 #include <asm-generic/pgtable.h>
438 
439 #endif /* !(__ASSEMBLY__) */
440 
441 #define VMALLOC_START           _AC(0xfe600000,UL)
442 #define VMALLOC_END             _AC(0xffc00000,UL)
443 
444 /* We provide our own get_unmapped_area to cope with VA holes for userland */
445 #define HAVE_ARCH_UNMAPPED_AREA
446 
447 /*
448  * No page table caches to initialise
449  */
450 #define pgtable_cache_init()	do { } while (0)
451 
452 #endif /* !(_SPARC_PGTABLE_H) */
453