xref: /linux/arch/sparc/include/asm/pgtable_64.h (revision 2a2dfc869d3345ccdd91322b023f4b0da84acbe7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * pgtable.h: SpitFire page table operations.
4  *
5  * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
6  * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7  */
8 
9 #ifndef _SPARC64_PGTABLE_H
10 #define _SPARC64_PGTABLE_H
11 
12 /* This file contains the functions and defines necessary to modify and use
13  * the SpitFire page tables.
14  */
15 
16 #include <asm-generic/pgtable-nop4d.h>
17 #include <linux/compiler.h>
18 #include <linux/const.h>
19 #include <asm/types.h>
20 #include <asm/spitfire.h>
21 #include <asm/asi.h>
22 #include <asm/adi.h>
23 #include <asm/page.h>
24 #include <asm/processor.h>
25 
26 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
27  * The page copy blockops can use 0x6000000 to 0x8000000.
28  * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
29  * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
30  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
31  * The vmalloc area spans 0x100000000 to 0x200000000.
32  * Since modules need to be in the lowest 32-bits of the address space,
33  * we place them right before the OBP area from 0x10000000 to 0xf0000000.
34  * There is a single static kernel PMD which maps from 0x0 to address
35  * 0x400000000.
36  */
37 #define	TLBTEMP_BASE		_AC(0x0000000006000000,UL)
38 #define	TSBMAP_8K_BASE		_AC(0x0000000008000000,UL)
39 #define	TSBMAP_4M_BASE		_AC(0x0000000008400000,UL)
40 #define MODULES_VADDR		_AC(0x0000000010000000,UL)
41 #define MODULES_LEN		_AC(0x00000000e0000000,UL)
42 #define MODULES_END		_AC(0x00000000f0000000,UL)
43 #define LOW_OBP_ADDRESS		_AC(0x00000000f0000000,UL)
44 #define HI_OBP_ADDRESS		_AC(0x0000000100000000,UL)
45 #define VMALLOC_START		_AC(0x0000000100000000,UL)
46 #define VMEMMAP_BASE		VMALLOC_END
47 
48 /* PMD_SHIFT determines the size of the area a second-level page
49  * table can map
50  */
51 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3))
52 #define PMD_SIZE	(_AC(1,UL) << PMD_SHIFT)
53 #define PMD_MASK	(~(PMD_SIZE-1))
54 #define PMD_BITS	(PAGE_SHIFT - 3)
55 
56 /* PUD_SHIFT determines the size of the area a third-level page
57  * table can map
58  */
59 #define PUD_SHIFT	(PMD_SHIFT + PMD_BITS)
60 #define PUD_SIZE	(_AC(1,UL) << PUD_SHIFT)
61 #define PUD_MASK	(~(PUD_SIZE-1))
62 #define PUD_BITS	(PAGE_SHIFT - 3)
63 
64 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
65 #define PGDIR_SHIFT	(PUD_SHIFT + PUD_BITS)
66 #define PGDIR_SIZE	(_AC(1,UL) << PGDIR_SHIFT)
67 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
68 #define PGDIR_BITS	(PAGE_SHIFT - 3)
69 
70 #if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
71 #error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
72 #endif
73 
74 #if (PGDIR_SHIFT + PGDIR_BITS) != 53
75 #error Page table parameters do not cover virtual address space properly.
76 #endif
77 
78 #if (PMD_SHIFT != HPAGE_SHIFT)
79 #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
80 #endif
81 
82 #ifndef __ASSEMBLY__
83 
84 extern unsigned long VMALLOC_END;
85 
86 #define vmemmap			((struct page *)VMEMMAP_BASE)
87 
88 #include <linux/sched.h>
89 
90 bool kern_addr_valid(unsigned long addr);
91 
92 /* Entries per page directory level. */
93 #define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-3))
94 #define PTRS_PER_PMD	(1UL << PMD_BITS)
95 #define PTRS_PER_PUD	(1UL << PUD_BITS)
96 #define PTRS_PER_PGD	(1UL << PGDIR_BITS)
97 
98 #define pmd_ERROR(e)							\
99 	pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n",		\
100 	       __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
101 #define pud_ERROR(e)							\
102 	pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n",		\
103 	       __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
104 #define pgd_ERROR(e)							\
105 	pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n",		\
106 	       __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
107 
108 #endif /* !(__ASSEMBLY__) */
109 
110 /* PTE bits which are the same in SUN4U and SUN4V format.  */
111 #define _PAGE_VALID	  _AC(0x8000000000000000,UL) /* Valid TTE            */
112 #define _PAGE_R	  	  _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
113 #define _PAGE_SPECIAL     _AC(0x0200000000000000,UL) /* Special page         */
114 #define _PAGE_PMD_HUGE    _AC(0x0100000000000000,UL) /* Huge page            */
115 #define _PAGE_PUD_HUGE    _PAGE_PMD_HUGE
116 
117 /* SUN4U pte bits... */
118 #define _PAGE_SZ4MB_4U	  _AC(0x6000000000000000,UL) /* 4MB Page             */
119 #define _PAGE_SZ512K_4U	  _AC(0x4000000000000000,UL) /* 512K Page            */
120 #define _PAGE_SZ64K_4U	  _AC(0x2000000000000000,UL) /* 64K Page             */
121 #define _PAGE_SZ8K_4U	  _AC(0x0000000000000000,UL) /* 8K Page              */
122 #define _PAGE_NFO_4U	  _AC(0x1000000000000000,UL) /* No Fault Only        */
123 #define _PAGE_IE_4U	  _AC(0x0800000000000000,UL) /* Invert Endianness    */
124 #define _PAGE_SOFT2_4U	  _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
125 #define _PAGE_SPECIAL_4U  _AC(0x0200000000000000,UL) /* Special page         */
126 #define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page            */
127 #define _PAGE_RES1_4U	  _AC(0x0002000000000000,UL) /* Reserved             */
128 #define _PAGE_SZ32MB_4U	  _AC(0x0001000000000000,UL) /* (Panther) 32MB page  */
129 #define _PAGE_SZ256MB_4U  _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
130 #define _PAGE_SZALL_4U	  _AC(0x6001000000000000,UL) /* All pgsz bits        */
131 #define _PAGE_SN_4U	  _AC(0x0000800000000000,UL) /* (Cheetah) Snoop      */
132 #define _PAGE_RES2_4U	  _AC(0x0000780000000000,UL) /* Reserved             */
133 #define _PAGE_PADDR_4U	  _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13]  */
134 #define _PAGE_SOFT_4U	  _AC(0x0000000000001F80,UL) /* Software bits:       */
135 #define _PAGE_EXEC_4U	  _AC(0x0000000000001000,UL) /* Executable SW bit    */
136 #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty)     */
137 #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd)     */
138 #define _PAGE_READ_4U	  _AC(0x0000000000000200,UL) /* Readable SW Bit      */
139 #define _PAGE_WRITE_4U	  _AC(0x0000000000000100,UL) /* Writable SW Bit      */
140 #define _PAGE_PRESENT_4U  _AC(0x0000000000000080,UL) /* Present              */
141 #define _PAGE_L_4U	  _AC(0x0000000000000040,UL) /* Locked TTE           */
142 #define _PAGE_CP_4U	  _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
143 #define _PAGE_CV_4U	  _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
144 #define _PAGE_E_4U	  _AC(0x0000000000000008,UL) /* side-Effect          */
145 #define _PAGE_P_4U	  _AC(0x0000000000000004,UL) /* Privileged Page      */
146 #define _PAGE_W_4U	  _AC(0x0000000000000002,UL) /* Writable             */
147 
148 /* SUN4V pte bits... */
149 #define _PAGE_NFO_4V	  _AC(0x4000000000000000,UL) /* No Fault Only        */
150 #define _PAGE_SOFT2_4V	  _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
151 #define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty)     */
152 #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd)     */
153 #define _PAGE_READ_4V	  _AC(0x0800000000000000,UL) /* Readable SW Bit      */
154 #define _PAGE_WRITE_4V	  _AC(0x0400000000000000,UL) /* Writable SW Bit      */
155 #define _PAGE_SPECIAL_4V  _AC(0x0200000000000000,UL) /* Special page         */
156 #define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page            */
157 #define _PAGE_PADDR_4V	  _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13]         */
158 #define _PAGE_IE_4V	  _AC(0x0000000000001000,UL) /* Invert Endianness    */
159 #define _PAGE_E_4V	  _AC(0x0000000000000800,UL) /* side-Effect          */
160 #define _PAGE_CP_4V	  _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
161 #define _PAGE_CV_4V	  _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
162 /* Bit 9 is used to enable MCD corruption detection instead on M7 */
163 #define _PAGE_MCD_4V      _AC(0x0000000000000200,UL) /* Memory Corruption    */
164 #define _PAGE_P_4V	  _AC(0x0000000000000100,UL) /* Privileged Page      */
165 #define _PAGE_EXEC_4V	  _AC(0x0000000000000080,UL) /* Executable Page      */
166 #define _PAGE_W_4V	  _AC(0x0000000000000040,UL) /* Writable             */
167 #define _PAGE_SOFT_4V	  _AC(0x0000000000000030,UL) /* Software bits        */
168 #define _PAGE_PRESENT_4V  _AC(0x0000000000000010,UL) /* Present              */
169 #define _PAGE_RESV_4V	  _AC(0x0000000000000008,UL) /* Reserved             */
170 #define _PAGE_SZ16GB_4V	  _AC(0x0000000000000007,UL) /* 16GB Page            */
171 #define _PAGE_SZ2GB_4V	  _AC(0x0000000000000006,UL) /* 2GB Page             */
172 #define _PAGE_SZ256MB_4V  _AC(0x0000000000000005,UL) /* 256MB Page           */
173 #define _PAGE_SZ32MB_4V	  _AC(0x0000000000000004,UL) /* 32MB Page            */
174 #define _PAGE_SZ4MB_4V	  _AC(0x0000000000000003,UL) /* 4MB Page             */
175 #define _PAGE_SZ512K_4V	  _AC(0x0000000000000002,UL) /* 512K Page            */
176 #define _PAGE_SZ64K_4V	  _AC(0x0000000000000001,UL) /* 64K Page             */
177 #define _PAGE_SZ8K_4V	  _AC(0x0000000000000000,UL) /* 8K Page              */
178 #define _PAGE_SZALL_4V	  _AC(0x0000000000000007,UL) /* All pgsz bits        */
179 
180 #define _PAGE_SZBITS_4U	_PAGE_SZ8K_4U
181 #define _PAGE_SZBITS_4V	_PAGE_SZ8K_4V
182 
183 #if REAL_HPAGE_SHIFT != 22
184 #error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
185 #endif
186 
187 #define _PAGE_SZHUGE_4U	_PAGE_SZ4MB_4U
188 #define _PAGE_SZHUGE_4V	_PAGE_SZ4MB_4V
189 
190 #ifndef __ASSEMBLY__
191 
192 pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
193 
194 unsigned long pte_sz_bits(unsigned long size);
195 
196 extern pgprot_t PAGE_KERNEL;
197 extern pgprot_t PAGE_KERNEL_LOCKED;
198 extern pgprot_t PAGE_COPY;
199 extern pgprot_t PAGE_SHARED;
200 
201 /* XXX This ugliness is for the atyfb driver's sparc mmap() support. XXX */
202 extern unsigned long _PAGE_IE;
203 extern unsigned long _PAGE_E;
204 extern unsigned long _PAGE_CACHE;
205 
206 extern unsigned long pg_iobits;
207 extern unsigned long _PAGE_ALL_SZ_BITS;
208 
209 extern struct page *mem_map_zero;
210 #define ZERO_PAGE(vaddr)	(mem_map_zero)
211 
212 /* PFNs are real physical page numbers.  However, mem_map only begins to record
213  * per-page information starting at pfn_base.  This is to handle systems where
214  * the first physical page in the machine is at some huge physical address,
215  * such as 4GB.   This is common on a partitioned E10000, for example.
216  */
217 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
218 {
219 	unsigned long paddr = pfn << PAGE_SHIFT;
220 
221 	BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
222 	return __pte(paddr | pgprot_val(prot));
223 }
224 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
225 
226 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
227 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
228 {
229 	pte_t pte = pfn_pte(page_nr, pgprot);
230 
231 	return __pmd(pte_val(pte));
232 }
233 #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
234 #endif
235 
236 /* This one can be done with two shifts.  */
237 static inline unsigned long pte_pfn(pte_t pte)
238 {
239 	unsigned long ret;
240 
241 	__asm__ __volatile__(
242 	"\n661:	sllx		%1, %2, %0\n"
243 	"	srlx		%0, %3, %0\n"
244 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
245 	"	.word		661b\n"
246 	"	sllx		%1, %4, %0\n"
247 	"	srlx		%0, %5, %0\n"
248 	"	.previous\n"
249 	: "=r" (ret)
250 	: "r" (pte_val(pte)),
251 	  "i" (21), "i" (21 + PAGE_SHIFT),
252 	  "i" (8), "i" (8 + PAGE_SHIFT));
253 
254 	return ret;
255 }
256 #define pte_page(x) pfn_to_page(pte_pfn(x))
257 
258 static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
259 {
260 	unsigned long mask, tmp;
261 
262 	/* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
263 	 * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
264 	 *
265 	 * Even if we use negation tricks the result is still a 6
266 	 * instruction sequence, so don't try to play fancy and just
267 	 * do the most straightforward implementation.
268 	 *
269 	 * Note: We encode this into 3 sun4v 2-insn patch sequences.
270 	 */
271 
272 	BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
273 	__asm__ __volatile__(
274 	"\n661:	sethi		%%uhi(%2), %1\n"
275 	"	sethi		%%hi(%2), %0\n"
276 	"\n662:	or		%1, %%ulo(%2), %1\n"
277 	"	or		%0, %%lo(%2), %0\n"
278 	"\n663:	sllx		%1, 32, %1\n"
279 	"	or		%0, %1, %0\n"
280 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
281 	"	.word		661b\n"
282 	"	sethi		%%uhi(%3), %1\n"
283 	"	sethi		%%hi(%3), %0\n"
284 	"	.word		662b\n"
285 	"	or		%1, %%ulo(%3), %1\n"
286 	"	or		%0, %%lo(%3), %0\n"
287 	"	.word		663b\n"
288 	"	sllx		%1, 32, %1\n"
289 	"	or		%0, %1, %0\n"
290 	"	.previous\n"
291 	"	.section	.sun_m7_2insn_patch, \"ax\"\n"
292 	"	.word		661b\n"
293 	"	sethi		%%uhi(%4), %1\n"
294 	"	sethi		%%hi(%4), %0\n"
295 	"	.word		662b\n"
296 	"	or		%1, %%ulo(%4), %1\n"
297 	"	or		%0, %%lo(%4), %0\n"
298 	"	.word		663b\n"
299 	"	sllx		%1, 32, %1\n"
300 	"	or		%0, %1, %0\n"
301 	"	.previous\n"
302 	: "=r" (mask), "=r" (tmp)
303 	: "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
304 	       _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
305 	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
306 	  "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
307 	       _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
308 	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V),
309 	  "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
310 	       _PAGE_CP_4V | _PAGE_E_4V |
311 	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
312 
313 	return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
314 }
315 
316 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
317 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
318 {
319 	pte_t pte = __pte(pmd_val(pmd));
320 
321 	pte = pte_modify(pte, newprot);
322 
323 	return __pmd(pte_val(pte));
324 }
325 #endif
326 
327 static inline pgprot_t pgprot_noncached(pgprot_t prot)
328 {
329 	unsigned long val = pgprot_val(prot);
330 
331 	__asm__ __volatile__(
332 	"\n661:	andn		%0, %2, %0\n"
333 	"	or		%0, %3, %0\n"
334 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
335 	"	.word		661b\n"
336 	"	andn		%0, %4, %0\n"
337 	"	or		%0, %5, %0\n"
338 	"	.previous\n"
339 	"	.section	.sun_m7_2insn_patch, \"ax\"\n"
340 	"	.word		661b\n"
341 	"	andn		%0, %6, %0\n"
342 	"	or		%0, %5, %0\n"
343 	"	.previous\n"
344 	: "=r" (val)
345 	: "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
346 	             "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V),
347 	             "i" (_PAGE_CP_4V));
348 
349 	return __pgprot(val);
350 }
351 /* Various pieces of code check for platform support by ifdef testing
352  * on "pgprot_noncached".  That's broken and should be fixed, but for
353  * now...
354  */
355 #define pgprot_noncached pgprot_noncached
356 
357 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
358 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
359 #define arch_make_huge_pte arch_make_huge_pte
360 static inline unsigned long __pte_default_huge_mask(void)
361 {
362 	unsigned long mask;
363 
364 	__asm__ __volatile__(
365 	"\n661:	sethi		%%uhi(%1), %0\n"
366 	"	sllx		%0, 32, %0\n"
367 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
368 	"	.word		661b\n"
369 	"	mov		%2, %0\n"
370 	"	nop\n"
371 	"	.previous\n"
372 	: "=r" (mask)
373 	: "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
374 
375 	return mask;
376 }
377 
378 static inline pte_t pte_mkhuge(pte_t pte)
379 {
380 	return __pte(pte_val(pte) | __pte_default_huge_mask());
381 }
382 
383 static inline bool is_default_hugetlb_pte(pte_t pte)
384 {
385 	unsigned long mask = __pte_default_huge_mask();
386 
387 	return (pte_val(pte) & mask) == mask;
388 }
389 
390 static inline bool is_hugetlb_pmd(pmd_t pmd)
391 {
392 	return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
393 }
394 
395 static inline bool is_hugetlb_pud(pud_t pud)
396 {
397 	return !!(pud_val(pud) & _PAGE_PUD_HUGE);
398 }
399 
400 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
401 static inline pmd_t pmd_mkhuge(pmd_t pmd)
402 {
403 	pte_t pte = __pte(pmd_val(pmd));
404 
405 	pte = pte_mkhuge(pte);
406 	pte_val(pte) |= _PAGE_PMD_HUGE;
407 
408 	return __pmd(pte_val(pte));
409 }
410 #endif
411 #else
412 static inline bool is_hugetlb_pte(pte_t pte)
413 {
414 	return false;
415 }
416 #endif
417 
418 static inline pte_t pte_mkdirty(pte_t pte)
419 {
420 	unsigned long val = pte_val(pte), tmp;
421 
422 	__asm__ __volatile__(
423 	"\n661:	or		%0, %3, %0\n"
424 	"	nop\n"
425 	"\n662:	nop\n"
426 	"	nop\n"
427 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
428 	"	.word		661b\n"
429 	"	sethi		%%uhi(%4), %1\n"
430 	"	sllx		%1, 32, %1\n"
431 	"	.word		662b\n"
432 	"	or		%1, %%lo(%4), %1\n"
433 	"	or		%0, %1, %0\n"
434 	"	.previous\n"
435 	: "=r" (val), "=r" (tmp)
436 	: "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
437 	  "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
438 
439 	return __pte(val);
440 }
441 
442 static inline pte_t pte_mkclean(pte_t pte)
443 {
444 	unsigned long val = pte_val(pte), tmp;
445 
446 	__asm__ __volatile__(
447 	"\n661:	andn		%0, %3, %0\n"
448 	"	nop\n"
449 	"\n662:	nop\n"
450 	"	nop\n"
451 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
452 	"	.word		661b\n"
453 	"	sethi		%%uhi(%4), %1\n"
454 	"	sllx		%1, 32, %1\n"
455 	"	.word		662b\n"
456 	"	or		%1, %%lo(%4), %1\n"
457 	"	andn		%0, %1, %0\n"
458 	"	.previous\n"
459 	: "=r" (val), "=r" (tmp)
460 	: "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
461 	  "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
462 
463 	return __pte(val);
464 }
465 
466 static inline pte_t pte_mkwrite(pte_t pte)
467 {
468 	unsigned long val = pte_val(pte), mask;
469 
470 	__asm__ __volatile__(
471 	"\n661:	mov		%1, %0\n"
472 	"	nop\n"
473 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
474 	"	.word		661b\n"
475 	"	sethi		%%uhi(%2), %0\n"
476 	"	sllx		%0, 32, %0\n"
477 	"	.previous\n"
478 	: "=r" (mask)
479 	: "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
480 
481 	return __pte(val | mask);
482 }
483 
484 static inline pte_t pte_wrprotect(pte_t pte)
485 {
486 	unsigned long val = pte_val(pte), tmp;
487 
488 	__asm__ __volatile__(
489 	"\n661:	andn		%0, %3, %0\n"
490 	"	nop\n"
491 	"\n662:	nop\n"
492 	"	nop\n"
493 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
494 	"	.word		661b\n"
495 	"	sethi		%%uhi(%4), %1\n"
496 	"	sllx		%1, 32, %1\n"
497 	"	.word		662b\n"
498 	"	or		%1, %%lo(%4), %1\n"
499 	"	andn		%0, %1, %0\n"
500 	"	.previous\n"
501 	: "=r" (val), "=r" (tmp)
502 	: "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
503 	  "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
504 
505 	return __pte(val);
506 }
507 
508 static inline pte_t pte_mkold(pte_t pte)
509 {
510 	unsigned long mask;
511 
512 	__asm__ __volatile__(
513 	"\n661:	mov		%1, %0\n"
514 	"	nop\n"
515 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
516 	"	.word		661b\n"
517 	"	sethi		%%uhi(%2), %0\n"
518 	"	sllx		%0, 32, %0\n"
519 	"	.previous\n"
520 	: "=r" (mask)
521 	: "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
522 
523 	mask |= _PAGE_R;
524 
525 	return __pte(pte_val(pte) & ~mask);
526 }
527 
528 static inline pte_t pte_mkyoung(pte_t pte)
529 {
530 	unsigned long mask;
531 
532 	__asm__ __volatile__(
533 	"\n661:	mov		%1, %0\n"
534 	"	nop\n"
535 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
536 	"	.word		661b\n"
537 	"	sethi		%%uhi(%2), %0\n"
538 	"	sllx		%0, 32, %0\n"
539 	"	.previous\n"
540 	: "=r" (mask)
541 	: "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
542 
543 	mask |= _PAGE_R;
544 
545 	return __pte(pte_val(pte) | mask);
546 }
547 
548 static inline pte_t pte_mkspecial(pte_t pte)
549 {
550 	pte_val(pte) |= _PAGE_SPECIAL;
551 	return pte;
552 }
553 
554 static inline pte_t pte_mkmcd(pte_t pte)
555 {
556 	pte_val(pte) |= _PAGE_MCD_4V;
557 	return pte;
558 }
559 
560 static inline pte_t pte_mknotmcd(pte_t pte)
561 {
562 	pte_val(pte) &= ~_PAGE_MCD_4V;
563 	return pte;
564 }
565 
566 static inline unsigned long pte_young(pte_t pte)
567 {
568 	unsigned long mask;
569 
570 	__asm__ __volatile__(
571 	"\n661:	mov		%1, %0\n"
572 	"	nop\n"
573 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
574 	"	.word		661b\n"
575 	"	sethi		%%uhi(%2), %0\n"
576 	"	sllx		%0, 32, %0\n"
577 	"	.previous\n"
578 	: "=r" (mask)
579 	: "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
580 
581 	return (pte_val(pte) & mask);
582 }
583 
584 static inline unsigned long pte_dirty(pte_t pte)
585 {
586 	unsigned long mask;
587 
588 	__asm__ __volatile__(
589 	"\n661:	mov		%1, %0\n"
590 	"	nop\n"
591 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
592 	"	.word		661b\n"
593 	"	sethi		%%uhi(%2), %0\n"
594 	"	sllx		%0, 32, %0\n"
595 	"	.previous\n"
596 	: "=r" (mask)
597 	: "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
598 
599 	return (pte_val(pte) & mask);
600 }
601 
602 static inline unsigned long pte_write(pte_t pte)
603 {
604 	unsigned long mask;
605 
606 	__asm__ __volatile__(
607 	"\n661:	mov		%1, %0\n"
608 	"	nop\n"
609 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
610 	"	.word		661b\n"
611 	"	sethi		%%uhi(%2), %0\n"
612 	"	sllx		%0, 32, %0\n"
613 	"	.previous\n"
614 	: "=r" (mask)
615 	: "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
616 
617 	return (pte_val(pte) & mask);
618 }
619 
620 static inline unsigned long pte_exec(pte_t pte)
621 {
622 	unsigned long mask;
623 
624 	__asm__ __volatile__(
625 	"\n661:	sethi		%%hi(%1), %0\n"
626 	"	.section	.sun4v_1insn_patch, \"ax\"\n"
627 	"	.word		661b\n"
628 	"	mov		%2, %0\n"
629 	"	.previous\n"
630 	: "=r" (mask)
631 	: "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
632 
633 	return (pte_val(pte) & mask);
634 }
635 
636 static inline unsigned long pte_present(pte_t pte)
637 {
638 	unsigned long val = pte_val(pte);
639 
640 	__asm__ __volatile__(
641 	"\n661:	and		%0, %2, %0\n"
642 	"	.section	.sun4v_1insn_patch, \"ax\"\n"
643 	"	.word		661b\n"
644 	"	and		%0, %3, %0\n"
645 	"	.previous\n"
646 	: "=r" (val)
647 	: "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
648 
649 	return val;
650 }
651 
652 #define pte_accessible pte_accessible
653 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
654 {
655 	return pte_val(a) & _PAGE_VALID;
656 }
657 
658 static inline unsigned long pte_special(pte_t pte)
659 {
660 	return pte_val(pte) & _PAGE_SPECIAL;
661 }
662 
663 #define pmd_leaf	pmd_large
664 static inline unsigned long pmd_large(pmd_t pmd)
665 {
666 	pte_t pte = __pte(pmd_val(pmd));
667 
668 	return pte_val(pte) & _PAGE_PMD_HUGE;
669 }
670 
671 static inline unsigned long pmd_pfn(pmd_t pmd)
672 {
673 	pte_t pte = __pte(pmd_val(pmd));
674 
675 	return pte_pfn(pte);
676 }
677 
678 #define pmd_write pmd_write
679 static inline unsigned long pmd_write(pmd_t pmd)
680 {
681 	pte_t pte = __pte(pmd_val(pmd));
682 
683 	return pte_write(pte);
684 }
685 
686 #define pud_write(pud)	pte_write(__pte(pud_val(pud)))
687 
688 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
689 static inline unsigned long pmd_dirty(pmd_t pmd)
690 {
691 	pte_t pte = __pte(pmd_val(pmd));
692 
693 	return pte_dirty(pte);
694 }
695 
696 static inline unsigned long pmd_young(pmd_t pmd)
697 {
698 	pte_t pte = __pte(pmd_val(pmd));
699 
700 	return pte_young(pte);
701 }
702 
703 static inline unsigned long pmd_trans_huge(pmd_t pmd)
704 {
705 	pte_t pte = __pte(pmd_val(pmd));
706 
707 	return pte_val(pte) & _PAGE_PMD_HUGE;
708 }
709 
710 static inline pmd_t pmd_mkold(pmd_t pmd)
711 {
712 	pte_t pte = __pte(pmd_val(pmd));
713 
714 	pte = pte_mkold(pte);
715 
716 	return __pmd(pte_val(pte));
717 }
718 
719 static inline pmd_t pmd_wrprotect(pmd_t pmd)
720 {
721 	pte_t pte = __pte(pmd_val(pmd));
722 
723 	pte = pte_wrprotect(pte);
724 
725 	return __pmd(pte_val(pte));
726 }
727 
728 static inline pmd_t pmd_mkdirty(pmd_t pmd)
729 {
730 	pte_t pte = __pte(pmd_val(pmd));
731 
732 	pte = pte_mkdirty(pte);
733 
734 	return __pmd(pte_val(pte));
735 }
736 
737 static inline pmd_t pmd_mkclean(pmd_t pmd)
738 {
739 	pte_t pte = __pte(pmd_val(pmd));
740 
741 	pte = pte_mkclean(pte);
742 
743 	return __pmd(pte_val(pte));
744 }
745 
746 static inline pmd_t pmd_mkyoung(pmd_t pmd)
747 {
748 	pte_t pte = __pte(pmd_val(pmd));
749 
750 	pte = pte_mkyoung(pte);
751 
752 	return __pmd(pte_val(pte));
753 }
754 
755 static inline pmd_t pmd_mkwrite(pmd_t pmd)
756 {
757 	pte_t pte = __pte(pmd_val(pmd));
758 
759 	pte = pte_mkwrite(pte);
760 
761 	return __pmd(pte_val(pte));
762 }
763 
764 static inline pgprot_t pmd_pgprot(pmd_t entry)
765 {
766 	unsigned long val = pmd_val(entry);
767 
768 	return __pgprot(val);
769 }
770 #endif
771 
772 static inline int pmd_present(pmd_t pmd)
773 {
774 	return pmd_val(pmd) != 0UL;
775 }
776 
777 #define pmd_none(pmd)			(!pmd_val(pmd))
778 
779 /* pmd_bad() is only called on non-trans-huge PMDs.  Our encoding is
780  * very simple, it's just the physical address.  PTE tables are of
781  * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
782  * the top bits outside of the range of any physical address size we
783  * support are clear as well.  We also validate the physical itself.
784  */
785 #define pmd_bad(pmd)			(pmd_val(pmd) & ~PAGE_MASK)
786 
787 #define pud_none(pud)			(!pud_val(pud))
788 
789 #define pud_bad(pud)			(pud_val(pud) & ~PAGE_MASK)
790 
791 #define p4d_none(p4d)			(!p4d_val(p4d))
792 
793 #define p4d_bad(p4d)			(p4d_val(p4d) & ~PAGE_MASK)
794 
795 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
796 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
797 		pmd_t *pmdp, pmd_t pmd);
798 #else
799 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
800 			      pmd_t *pmdp, pmd_t pmd)
801 {
802 	*pmdp = pmd;
803 }
804 #endif
805 
806 static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
807 {
808 	unsigned long val = __pa((unsigned long) (ptep));
809 
810 	pmd_val(*pmdp) = val;
811 }
812 
813 #define pud_set(pudp, pmdp)	\
814 	(pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
815 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
816 {
817 	pte_t pte = __pte(pmd_val(pmd));
818 	unsigned long pfn;
819 
820 	pfn = pte_pfn(pte);
821 
822 	return ((unsigned long) __va(pfn << PAGE_SHIFT));
823 }
824 
825 static inline pmd_t *pud_pgtable(pud_t pud)
826 {
827 	pte_t pte = __pte(pud_val(pud));
828 	unsigned long pfn;
829 
830 	pfn = pte_pfn(pte);
831 
832 	return ((pmd_t *) __va(pfn << PAGE_SHIFT));
833 }
834 
835 #define pmd_page(pmd) 			virt_to_page((void *)pmd_page_vaddr(pmd))
836 #define pud_page(pud)			virt_to_page((void *)pud_pgtable(pud))
837 #define pmd_clear(pmdp)			(pmd_val(*(pmdp)) = 0UL)
838 #define pud_present(pud)		(pud_val(pud) != 0U)
839 #define pud_clear(pudp)			(pud_val(*(pudp)) = 0UL)
840 #define p4d_pgtable(p4d)		\
841 	((pud_t *) __va(p4d_val(p4d)))
842 #define p4d_present(p4d)		(p4d_val(p4d) != 0U)
843 #define p4d_clear(p4dp)			(p4d_val(*(p4dp)) = 0UL)
844 
845 /* only used by the stubbed out hugetlb gup code, should never be called */
846 #define p4d_page(p4d)			NULL
847 
848 #define pud_leaf	pud_large
849 static inline unsigned long pud_large(pud_t pud)
850 {
851 	pte_t pte = __pte(pud_val(pud));
852 
853 	return pte_val(pte) & _PAGE_PMD_HUGE;
854 }
855 
856 static inline unsigned long pud_pfn(pud_t pud)
857 {
858 	pte_t pte = __pte(pud_val(pud));
859 
860 	return pte_pfn(pte);
861 }
862 
863 /* Same in both SUN4V and SUN4U.  */
864 #define pte_none(pte) 			(!pte_val(pte))
865 
866 #define p4d_set(p4dp, pudp)	\
867 	(p4d_val(*(p4dp)) = (__pa((unsigned long) (pudp))))
868 
869 /* We cannot include <linux/mm_types.h> at this point yet: */
870 extern struct mm_struct init_mm;
871 
872 /* Actual page table PTE updates.  */
873 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
874 		   pte_t *ptep, pte_t orig, int fullmm,
875 		   unsigned int hugepage_shift);
876 
877 static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
878 				pte_t *ptep, pte_t orig, int fullmm,
879 				unsigned int hugepage_shift)
880 {
881 	/* It is more efficient to let flush_tlb_kernel_range()
882 	 * handle init_mm tlb flushes.
883 	 *
884 	 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
885 	 *             and SUN4V pte layout, so this inline test is fine.
886 	 */
887 	if (likely(mm != &init_mm) && pte_accessible(mm, orig))
888 		tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift);
889 }
890 
891 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
892 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
893 					    unsigned long addr,
894 					    pmd_t *pmdp)
895 {
896 	pmd_t pmd = *pmdp;
897 	set_pmd_at(mm, addr, pmdp, __pmd(0UL));
898 	return pmd;
899 }
900 
901 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
902 			     pte_t *ptep, pte_t pte, int fullmm)
903 {
904 	pte_t orig = *ptep;
905 
906 	*ptep = pte;
907 	maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT);
908 }
909 
910 #define set_pte_at(mm,addr,ptep,pte)	\
911 	__set_pte_at((mm), (addr), (ptep), (pte), 0)
912 
913 #define pte_clear(mm,addr,ptep)		\
914 	set_pte_at((mm), (addr), (ptep), __pte(0UL))
915 
916 #define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
917 #define pte_clear_not_present_full(mm,addr,ptep,fullmm)	\
918 	__set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
919 
920 #ifdef DCACHE_ALIASING_POSSIBLE
921 #define __HAVE_ARCH_MOVE_PTE
922 #define move_pte(pte, prot, old_addr, new_addr)				\
923 ({									\
924 	pte_t newpte = (pte);						\
925 	if (tlb_type != hypervisor && pte_present(pte)) {		\
926 		unsigned long this_pfn = pte_pfn(pte);			\
927 									\
928 		if (pfn_valid(this_pfn) &&				\
929 		    (((old_addr) ^ (new_addr)) & (1 << 13)))		\
930 			flush_dcache_page_all(current->mm,		\
931 					      pfn_to_page(this_pfn));	\
932 	}								\
933 	newpte;								\
934 })
935 #endif
936 
937 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
938 
939 void paging_init(void);
940 unsigned long find_ecache_flush_span(unsigned long size);
941 
942 struct seq_file;
943 void mmu_info(struct seq_file *);
944 
945 struct vm_area_struct;
946 void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
947 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
948 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
949 			  pmd_t *pmd);
950 
951 #define __HAVE_ARCH_PMDP_INVALIDATE
952 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
953 			    pmd_t *pmdp);
954 
955 #define __HAVE_ARCH_PGTABLE_DEPOSIT
956 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
957 				pgtable_t pgtable);
958 
959 #define __HAVE_ARCH_PGTABLE_WITHDRAW
960 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
961 #endif
962 
963 /* Encode and de-code a swap entry */
964 #define __swp_type(entry)	(((entry).val >> PAGE_SHIFT) & 0xffUL)
965 #define __swp_offset(entry)	((entry).val >> (PAGE_SHIFT + 8UL))
966 #define __swp_entry(type, offset)	\
967 	( (swp_entry_t) \
968 	  { \
969 		(((long)(type) << PAGE_SHIFT) | \
970                  ((long)(offset) << (PAGE_SHIFT + 8UL))) \
971 	  } )
972 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
973 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
974 
975 int page_in_phys_avail(unsigned long paddr);
976 
977 /*
978  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
979  * its high 4 bits.  These macros/functions put it there or get it from there.
980  */
981 #define MK_IOSPACE_PFN(space, pfn)	(pfn | (space << (BITS_PER_LONG - 4)))
982 #define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
983 #define GET_PFN(pfn)			(pfn & 0x0fffffffffffffffUL)
984 
985 int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
986 		    unsigned long, pgprot_t);
987 
988 void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
989 		      unsigned long addr, pte_t pte);
990 
991 int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
992 		  unsigned long addr, pte_t oldpte);
993 
994 #define __HAVE_ARCH_DO_SWAP_PAGE
995 static inline void arch_do_swap_page(struct mm_struct *mm,
996 				     struct vm_area_struct *vma,
997 				     unsigned long addr,
998 				     pte_t pte, pte_t oldpte)
999 {
1000 	/* If this is a new page being mapped in, there can be no
1001 	 * ADI tags stored away for this page. Skip looking for
1002 	 * stored tags
1003 	 */
1004 	if (pte_none(oldpte))
1005 		return;
1006 
1007 	if (adi_state.enabled && (pte_val(pte) & _PAGE_MCD_4V))
1008 		adi_restore_tags(mm, vma, addr, pte);
1009 }
1010 
1011 #define __HAVE_ARCH_UNMAP_ONE
1012 static inline int arch_unmap_one(struct mm_struct *mm,
1013 				 struct vm_area_struct *vma,
1014 				 unsigned long addr, pte_t oldpte)
1015 {
1016 	if (adi_state.enabled && (pte_val(oldpte) & _PAGE_MCD_4V))
1017 		return adi_save_tags(mm, vma, addr, oldpte);
1018 	return 0;
1019 }
1020 
1021 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
1022 				     unsigned long from, unsigned long pfn,
1023 				     unsigned long size, pgprot_t prot)
1024 {
1025 	unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
1026 	int space = GET_IOSPACE(pfn);
1027 	unsigned long phys_base;
1028 
1029 	phys_base = offset | (((unsigned long) space) << 32UL);
1030 
1031 	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
1032 }
1033 #define io_remap_pfn_range io_remap_pfn_range
1034 
1035 static inline unsigned long __untagged_addr(unsigned long start)
1036 {
1037 	if (adi_capable()) {
1038 		long addr = start;
1039 
1040 		/* If userspace has passed a versioned address, kernel
1041 		 * will not find it in the VMAs since it does not store
1042 		 * the version tags in the list of VMAs. Storing version
1043 		 * tags in list of VMAs is impractical since they can be
1044 		 * changed any time from userspace without dropping into
1045 		 * kernel. Any address search in VMAs will be done with
1046 		 * non-versioned addresses. Ensure the ADI version bits
1047 		 * are dropped here by sign extending the last bit before
1048 		 * ADI bits. IOMMU does not implement version tags.
1049 		 */
1050 		return (addr << (long)adi_nbits()) >> (long)adi_nbits();
1051 	}
1052 
1053 	return start;
1054 }
1055 #define untagged_addr(addr) \
1056 	((__typeof__(addr))(__untagged_addr((unsigned long)(addr))))
1057 
1058 static inline bool pte_access_permitted(pte_t pte, bool write)
1059 {
1060 	u64 prot;
1061 
1062 	if (tlb_type == hypervisor) {
1063 		prot = _PAGE_PRESENT_4V | _PAGE_P_4V;
1064 		if (write)
1065 			prot |= _PAGE_WRITE_4V;
1066 	} else {
1067 		prot = _PAGE_PRESENT_4U | _PAGE_P_4U;
1068 		if (write)
1069 			prot |= _PAGE_WRITE_4U;
1070 	}
1071 
1072 	return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot;
1073 }
1074 #define pte_access_permitted pte_access_permitted
1075 
1076 #include <asm/tlbflush.h>
1077 
1078 /* We provide our own get_unmapped_area to cope with VA holes and
1079  * SHM area cache aliasing for userland.
1080  */
1081 #define HAVE_ARCH_UNMAPPED_AREA
1082 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1083 
1084 /* We provide a special get_unmapped_area for framebuffer mmaps to try and use
1085  * the largest alignment possible such that larget PTEs can be used.
1086  */
1087 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
1088 				   unsigned long, unsigned long,
1089 				   unsigned long);
1090 #define HAVE_ARCH_FB_UNMAPPED_AREA
1091 
1092 void sun4v_register_fault_status(void);
1093 void sun4v_ktsb_register(void);
1094 void __init cheetah_ecache_flush_init(void);
1095 void sun4v_patch_tlb_handlers(void);
1096 
1097 extern unsigned long cmdline_memory_size;
1098 
1099 asmlinkage void do_sparc64_fault(struct pt_regs *regs);
1100 
1101 #define pmd_pgtable(PMD)	((pte_t *)pmd_page_vaddr(PMD))
1102 
1103 #ifdef CONFIG_HUGETLB_PAGE
1104 
1105 #define pud_leaf_size pud_leaf_size
1106 extern unsigned long pud_leaf_size(pud_t pud);
1107 
1108 #define pmd_leaf_size pmd_leaf_size
1109 extern unsigned long pmd_leaf_size(pmd_t pmd);
1110 
1111 #define pte_leaf_size pte_leaf_size
1112 extern unsigned long pte_leaf_size(pte_t pte);
1113 
1114 #endif /* CONFIG_HUGETLB_PAGE */
1115 
1116 #endif /* !(__ASSEMBLY__) */
1117 
1118 #endif /* !(_SPARC64_PGTABLE_H) */
1119