xref: /linux/arch/csky/include/asm/pgtable.h (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef __ASM_CSKY_PGTABLE_H
4 #define __ASM_CSKY_PGTABLE_H
5 
6 #include <asm/fixmap.h>
7 #include <asm/memory.h>
8 #include <asm/addrspace.h>
9 #include <abi/pgtable-bits.h>
10 #include <asm-generic/pgtable-nopmd.h>
11 
12 #define PGDIR_SHIFT		22
13 #define PGDIR_SIZE		(1UL << PGDIR_SHIFT)
14 #define PGDIR_MASK		(~(PGDIR_SIZE-1))
15 
16 #define USER_PTRS_PER_PGD	(PAGE_OFFSET/PGDIR_SIZE)
17 
18 /*
19  * C-SKY is two-level paging structure:
20  */
21 
22 #define PTRS_PER_PGD	(PAGE_SIZE / sizeof(pgd_t))
23 #define PTRS_PER_PMD	1
24 #define PTRS_PER_PTE	(PAGE_SIZE / sizeof(pte_t))
25 
26 #define pte_ERROR(e) \
27 	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
28 #define pgd_ERROR(e) \
29 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
30 
31 #define PFN_PTE_SHIFT	PAGE_SHIFT
32 #define pmd_pfn(pmd)	(pmd_phys(pmd) >> PAGE_SHIFT)
33 #define pmd_page(pmd)	(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
34 #define pte_clear(mm, addr, ptep)	set_pte((ptep), \
35 	(((unsigned int) addr >= PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
36 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
37 #define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
38 #define pte_pfn(x)	((unsigned long)((x).pte_low >> PAGE_SHIFT))
39 #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
40 				| pgprot_val(prot))
41 
42 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
43 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
44 
45 #define pte_page(x)			pfn_to_page(pte_pfn(x))
46 #define __mk_pte(page_nr, pgprot)	__pte(((page_nr) << PAGE_SHIFT) | \
47 					pgprot_val(pgprot))
48 
49 /*
50  * C-SKY only has VALID and DIRTY bit in hardware. So we need to use the
51  * two bits emulate PRESENT, READ, WRITE, EXEC, MODIFIED, ACCESSED.
52  */
53 #define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED)
54 
55 #define PAGE_NONE	__pgprot(_PAGE_PROT_NONE)
56 #define PAGE_READ	__pgprot(_PAGE_BASE | _PAGE_READ | \
57 				_CACHE_CACHED)
58 #define PAGE_WRITE	__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE | \
59 				_CACHE_CACHED)
60 #define PAGE_SHARED PAGE_WRITE
61 
62 #define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \
63 				_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
64 				_PAGE_GLOBAL | \
65 				_CACHE_CACHED)
66 
67 #define _PAGE_IOREMAP		(_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \
68 				_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
69 				_PAGE_GLOBAL | \
70 				_CACHE_UNCACHED | _PAGE_SO)
71 
72 #define _PAGE_CHG_MASK	(~(unsigned long) \
73 				(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
74 				_CACHE_MASK | _PAGE_GLOBAL))
75 
76 #define MAX_SWAPFILES_CHECK() \
77 		BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5)
78 
79 extern void load_pgd(unsigned long pg_dir);
80 extern pte_t invalid_pte_table[PTRS_PER_PTE];
81 
82 static inline void set_pte(pte_t *p, pte_t pte)
83 {
84 	*p = pte;
85 #if defined(CONFIG_CPU_NEED_TLBSYNC)
86 	dcache_wb_line((u32)p);
87 #endif
88 	/* prevent out of order excution */
89 	smp_mb();
90 }
91 
92 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
93 {
94 	unsigned long ptr;
95 
96 	ptr = pmd_val(pmd);
97 
98 	return __va(ptr);
99 }
100 
101 #define pmd_phys(pmd) pmd_val(pmd)
102 
103 static inline void set_pmd(pmd_t *p, pmd_t pmd)
104 {
105 	*p = pmd;
106 #if defined(CONFIG_CPU_NEED_TLBSYNC)
107 	dcache_wb_line((u32)p);
108 #endif
109 	/* prevent specul excute */
110 	smp_mb();
111 }
112 
113 
114 static inline int pmd_none(pmd_t pmd)
115 {
116 	return pmd_val(pmd) == __pa(invalid_pte_table);
117 }
118 
119 #define pmd_bad(pmd)	(pmd_val(pmd) & ~PAGE_MASK)
120 
121 static inline int pmd_present(pmd_t pmd)
122 {
123 	return (pmd_val(pmd) != __pa(invalid_pte_table));
124 }
125 
126 static inline void pmd_clear(pmd_t *p)
127 {
128 	pmd_val(*p) = (__pa(invalid_pte_table));
129 #if defined(CONFIG_CPU_NEED_TLBSYNC)
130 	dcache_wb_line((u32)p);
131 #endif
132 }
133 
134 /*
135  * The following only work if pte_present() is true.
136  * Undefined behaviour if not..
137  */
138 static inline int pte_read(pte_t pte)
139 {
140 	return pte.pte_low & _PAGE_READ;
141 }
142 
143 static inline int pte_write(pte_t pte)
144 {
145 	return (pte).pte_low & _PAGE_WRITE;
146 }
147 
148 static inline int pte_dirty(pte_t pte)
149 {
150 	return (pte).pte_low & _PAGE_MODIFIED;
151 }
152 
153 static inline int pte_young(pte_t pte)
154 {
155 	return (pte).pte_low & _PAGE_ACCESSED;
156 }
157 
158 static inline pte_t pte_wrprotect(pte_t pte)
159 {
160 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
161 	return pte;
162 }
163 
164 static inline pte_t pte_mkclean(pte_t pte)
165 {
166 	pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY);
167 	return pte;
168 }
169 
170 static inline pte_t pte_mkold(pte_t pte)
171 {
172 	pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID);
173 	return pte;
174 }
175 
176 static inline pte_t pte_mkwrite_novma(pte_t pte)
177 {
178 	pte_val(pte) |= _PAGE_WRITE;
179 	if (pte_val(pte) & _PAGE_MODIFIED)
180 		pte_val(pte) |= _PAGE_DIRTY;
181 	return pte;
182 }
183 
184 static inline pte_t pte_mkdirty(pte_t pte)
185 {
186 	pte_val(pte) |= _PAGE_MODIFIED;
187 	if (pte_val(pte) & _PAGE_WRITE)
188 		pte_val(pte) |= _PAGE_DIRTY;
189 	return pte;
190 }
191 
192 static inline pte_t pte_mkyoung(pte_t pte)
193 {
194 	pte_val(pte) |= _PAGE_ACCESSED;
195 	if (pte_val(pte) & _PAGE_READ)
196 		pte_val(pte) |= _PAGE_VALID;
197 	return pte;
198 }
199 
200 static inline bool pte_swp_exclusive(pte_t pte)
201 {
202 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
203 }
204 
205 static inline pte_t pte_swp_mkexclusive(pte_t pte)
206 {
207 	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
208 	return pte;
209 }
210 
211 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
212 {
213 	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
214 	return pte;
215 }
216 
217 #define __HAVE_PHYS_MEM_ACCESS_PROT
218 struct file;
219 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
220 				     unsigned long size, pgprot_t vma_prot);
221 
222 /*
223  * Macro to make mark a page protection value as "uncacheable".  Note
224  * that "protection" is really a misnomer here as the protection value
225  * contains the memory attribute bits, dirty bits, and various other
226  * bits as well.
227  */
228 #define pgprot_noncached pgprot_noncached
229 
230 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
231 {
232 	unsigned long prot = pgprot_val(_prot);
233 
234 	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO;
235 
236 	return __pgprot(prot);
237 }
238 
239 #define pgprot_writecombine pgprot_writecombine
240 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
241 {
242 	unsigned long prot = pgprot_val(_prot);
243 
244 	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
245 
246 	return __pgprot(prot);
247 }
248 
249 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
250 {
251 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
252 		     (pgprot_val(newprot)));
253 }
254 
255 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
256 extern void paging_init(void);
257 
258 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
259 		unsigned long address, pte_t *pte, unsigned int nr);
260 #define update_mmu_cache(vma, addr, ptep) \
261 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
262 
263 #endif /* __ASM_CSKY_PGTABLE_H */
264