xref: /linux/arch/sparc/include/asm/pgtable_64.h (revision c745b15c1f9cea5680c2906ae868302108f8daf0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * pgtable.h: SpitFire page table operations.
4  *
5  * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
6  * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7  */
8 
9 #ifndef _SPARC64_PGTABLE_H
10 #define _SPARC64_PGTABLE_H
11 
12 /* This file contains the functions and defines necessary to modify and use
13  * the SpitFire page tables.
14  */
15 
16 #include <asm-generic/pgtable-nop4d.h>
17 #include <linux/compiler.h>
18 #include <linux/const.h>
19 #include <asm/types.h>
20 #include <asm/spitfire.h>
21 #include <asm/asi.h>
22 #include <asm/adi.h>
23 #include <asm/page.h>
24 #include <asm/processor.h>
25 
26 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
27  * The page copy blockops can use 0x6000000 to 0x8000000.
28  * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
29  * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
30  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
31  * The vmalloc area spans 0x100000000 to 0x200000000.
32  * Since modules need to be in the lowest 32-bits of the address space,
33  * we place them right before the OBP area from 0x10000000 to 0xf0000000.
34  * There is a single static kernel PMD which maps from 0x0 to address
35  * 0x400000000.
36  */
37 #define	TLBTEMP_BASE		_AC(0x0000000006000000,UL)
38 #define	TSBMAP_8K_BASE		_AC(0x0000000008000000,UL)
39 #define	TSBMAP_4M_BASE		_AC(0x0000000008400000,UL)
40 #define MODULES_VADDR		_AC(0x0000000010000000,UL)
41 #define MODULES_LEN		_AC(0x00000000e0000000,UL)
42 #define MODULES_END		_AC(0x00000000f0000000,UL)
43 #define LOW_OBP_ADDRESS		_AC(0x00000000f0000000,UL)
44 #define HI_OBP_ADDRESS		_AC(0x0000000100000000,UL)
45 #define VMALLOC_START		_AC(0x0000000100000000,UL)
46 #define VMEMMAP_BASE		VMALLOC_END
47 
48 /* PMD_SHIFT determines the size of the area a second-level page
49  * table can map
50  */
51 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3))
52 #define PMD_SIZE	(_AC(1,UL) << PMD_SHIFT)
53 #define PMD_MASK	(~(PMD_SIZE-1))
54 #define PMD_BITS	(PAGE_SHIFT - 3)
55 
56 /* PUD_SHIFT determines the size of the area a third-level page
57  * table can map
58  */
59 #define PUD_SHIFT	(PMD_SHIFT + PMD_BITS)
60 #define PUD_SIZE	(_AC(1,UL) << PUD_SHIFT)
61 #define PUD_MASK	(~(PUD_SIZE-1))
62 #define PUD_BITS	(PAGE_SHIFT - 3)
63 
64 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
65 #define PGDIR_SHIFT	(PUD_SHIFT + PUD_BITS)
66 #define PGDIR_SIZE	(_AC(1,UL) << PGDIR_SHIFT)
67 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
68 #define PGDIR_BITS	(PAGE_SHIFT - 3)
69 
70 #if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
71 #error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
72 #endif
73 
74 #if (PGDIR_SHIFT + PGDIR_BITS) != 53
75 #error Page table parameters do not cover virtual address space properly.
76 #endif
77 
78 #if (PMD_SHIFT != HPAGE_SHIFT)
79 #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
80 #endif
81 
82 #ifndef __ASSEMBLY__
83 
84 extern unsigned long VMALLOC_END;
85 
86 #define vmemmap			((struct page *)VMEMMAP_BASE)
87 
88 #include <linux/sched.h>
89 #include <asm/tlbflush.h>
90 
91 bool kern_addr_valid(unsigned long addr);
92 
93 /* Entries per page directory level. */
94 #define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-3))
95 #define PTRS_PER_PMD	(1UL << PMD_BITS)
96 #define PTRS_PER_PUD	(1UL << PUD_BITS)
97 #define PTRS_PER_PGD	(1UL << PGDIR_BITS)
98 
99 #define pmd_ERROR(e)							\
100 	pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n",		\
101 	       __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
102 #define pud_ERROR(e)							\
103 	pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n",		\
104 	       __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
105 #define pgd_ERROR(e)							\
106 	pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n",		\
107 	       __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
108 
109 #endif /* !(__ASSEMBLY__) */
110 
111 /* PTE bits which are the same in SUN4U and SUN4V format.  */
112 #define _PAGE_VALID	  _AC(0x8000000000000000,UL) /* Valid TTE            */
113 #define _PAGE_R	  	  _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
114 #define _PAGE_SPECIAL     _AC(0x0200000000000000,UL) /* Special page         */
115 #define _PAGE_PMD_HUGE    _AC(0x0100000000000000,UL) /* Huge page            */
116 #define _PAGE_PUD_HUGE    _PAGE_PMD_HUGE
117 
118 /* SUN4U pte bits... */
119 #define _PAGE_SZ4MB_4U	  _AC(0x6000000000000000,UL) /* 4MB Page             */
120 #define _PAGE_SZ512K_4U	  _AC(0x4000000000000000,UL) /* 512K Page            */
121 #define _PAGE_SZ64K_4U	  _AC(0x2000000000000000,UL) /* 64K Page             */
122 #define _PAGE_SZ8K_4U	  _AC(0x0000000000000000,UL) /* 8K Page              */
123 #define _PAGE_NFO_4U	  _AC(0x1000000000000000,UL) /* No Fault Only        */
124 #define _PAGE_IE_4U	  _AC(0x0800000000000000,UL) /* Invert Endianness    */
125 #define _PAGE_SOFT2_4U	  _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
126 #define _PAGE_SPECIAL_4U  _AC(0x0200000000000000,UL) /* Special page         */
127 #define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page            */
128 #define _PAGE_RES1_4U	  _AC(0x0002000000000000,UL) /* Reserved             */
129 #define _PAGE_SZ32MB_4U	  _AC(0x0001000000000000,UL) /* (Panther) 32MB page  */
130 #define _PAGE_SZ256MB_4U  _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
131 #define _PAGE_SZALL_4U	  _AC(0x6001000000000000,UL) /* All pgsz bits        */
132 #define _PAGE_SN_4U	  _AC(0x0000800000000000,UL) /* (Cheetah) Snoop      */
133 #define _PAGE_RES2_4U	  _AC(0x0000780000000000,UL) /* Reserved             */
134 #define _PAGE_PADDR_4U	  _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13]  */
135 #define _PAGE_SOFT_4U	  _AC(0x0000000000001F80,UL) /* Software bits:       */
136 #define _PAGE_EXEC_4U	  _AC(0x0000000000001000,UL) /* Executable SW bit    */
137 #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty)     */
138 #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd)     */
139 #define _PAGE_READ_4U	  _AC(0x0000000000000200,UL) /* Readable SW Bit      */
140 #define _PAGE_WRITE_4U	  _AC(0x0000000000000100,UL) /* Writable SW Bit      */
141 #define _PAGE_PRESENT_4U  _AC(0x0000000000000080,UL) /* Present              */
142 #define _PAGE_L_4U	  _AC(0x0000000000000040,UL) /* Locked TTE           */
143 #define _PAGE_CP_4U	  _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
144 #define _PAGE_CV_4U	  _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
145 #define _PAGE_E_4U	  _AC(0x0000000000000008,UL) /* side-Effect          */
146 #define _PAGE_P_4U	  _AC(0x0000000000000004,UL) /* Privileged Page      */
147 #define _PAGE_W_4U	  _AC(0x0000000000000002,UL) /* Writable             */
148 
149 /* SUN4V pte bits... */
150 #define _PAGE_NFO_4V	  _AC(0x4000000000000000,UL) /* No Fault Only        */
151 #define _PAGE_SOFT2_4V	  _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
152 #define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty)     */
153 #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd)     */
154 #define _PAGE_READ_4V	  _AC(0x0800000000000000,UL) /* Readable SW Bit      */
155 #define _PAGE_WRITE_4V	  _AC(0x0400000000000000,UL) /* Writable SW Bit      */
156 #define _PAGE_SPECIAL_4V  _AC(0x0200000000000000,UL) /* Special page         */
157 #define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page            */
158 #define _PAGE_PADDR_4V	  _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13]         */
159 #define _PAGE_IE_4V	  _AC(0x0000000000001000,UL) /* Invert Endianness    */
160 #define _PAGE_E_4V	  _AC(0x0000000000000800,UL) /* side-Effect          */
161 #define _PAGE_CP_4V	  _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
162 #define _PAGE_CV_4V	  _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
163 /* Bit 9 is used to enable MCD corruption detection instead on M7 */
164 #define _PAGE_MCD_4V      _AC(0x0000000000000200,UL) /* Memory Corruption    */
165 #define _PAGE_P_4V	  _AC(0x0000000000000100,UL) /* Privileged Page      */
166 #define _PAGE_EXEC_4V	  _AC(0x0000000000000080,UL) /* Executable Page      */
167 #define _PAGE_W_4V	  _AC(0x0000000000000040,UL) /* Writable             */
168 #define _PAGE_SOFT_4V	  _AC(0x0000000000000030,UL) /* Software bits        */
169 #define _PAGE_PRESENT_4V  _AC(0x0000000000000010,UL) /* Present              */
170 #define _PAGE_RESV_4V	  _AC(0x0000000000000008,UL) /* Reserved             */
171 #define _PAGE_SZ16GB_4V	  _AC(0x0000000000000007,UL) /* 16GB Page            */
172 #define _PAGE_SZ2GB_4V	  _AC(0x0000000000000006,UL) /* 2GB Page             */
173 #define _PAGE_SZ256MB_4V  _AC(0x0000000000000005,UL) /* 256MB Page           */
174 #define _PAGE_SZ32MB_4V	  _AC(0x0000000000000004,UL) /* 32MB Page            */
175 #define _PAGE_SZ4MB_4V	  _AC(0x0000000000000003,UL) /* 4MB Page             */
176 #define _PAGE_SZ512K_4V	  _AC(0x0000000000000002,UL) /* 512K Page            */
177 #define _PAGE_SZ64K_4V	  _AC(0x0000000000000001,UL) /* 64K Page             */
178 #define _PAGE_SZ8K_4V	  _AC(0x0000000000000000,UL) /* 8K Page              */
179 #define _PAGE_SZALL_4V	  _AC(0x0000000000000007,UL) /* All pgsz bits        */
180 
181 #define _PAGE_SZBITS_4U	_PAGE_SZ8K_4U
182 #define _PAGE_SZBITS_4V	_PAGE_SZ8K_4V
183 
184 #if REAL_HPAGE_SHIFT != 22
185 #error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
186 #endif
187 
188 #define _PAGE_SZHUGE_4U	_PAGE_SZ4MB_4U
189 #define _PAGE_SZHUGE_4V	_PAGE_SZ4MB_4V
190 
191 /* We borrow bit 20 to store the exclusive marker in swap PTEs. */
192 #define _PAGE_SWP_EXCLUSIVE	_AC(0x0000000000100000, UL)
193 
194 #ifndef __ASSEMBLY__
195 
196 pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
197 
198 unsigned long pte_sz_bits(unsigned long size);
199 
200 extern pgprot_t PAGE_KERNEL;
201 extern pgprot_t PAGE_KERNEL_LOCKED;
202 extern pgprot_t PAGE_COPY;
203 extern pgprot_t PAGE_SHARED;
204 
205 /* XXX This ugliness is for the atyfb driver's sparc mmap() support. XXX */
206 extern unsigned long _PAGE_IE;
207 extern unsigned long _PAGE_E;
208 extern unsigned long _PAGE_CACHE;
209 
210 extern unsigned long pg_iobits;
211 extern unsigned long _PAGE_ALL_SZ_BITS;
212 
213 extern struct page *mem_map_zero;
214 #define ZERO_PAGE(vaddr)	(mem_map_zero)
215 
216 /* PFNs are real physical page numbers.  However, mem_map only begins to record
217  * per-page information starting at pfn_base.  This is to handle systems where
218  * the first physical page in the machine is at some huge physical address,
219  * such as 4GB.   This is common on a partitioned E10000, for example.
220  */
221 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
222 {
223 	unsigned long paddr = pfn << PAGE_SHIFT;
224 
225 	BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
226 	return __pte(paddr | pgprot_val(prot));
227 }
228 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
229 
230 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
231 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
232 {
233 	pte_t pte = pfn_pte(page_nr, pgprot);
234 
235 	return __pmd(pte_val(pte));
236 }
237 #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
238 #endif
239 
240 /* This one can be done with two shifts.  */
241 static inline unsigned long pte_pfn(pte_t pte)
242 {
243 	unsigned long ret;
244 
245 	__asm__ __volatile__(
246 	"\n661:	sllx		%1, %2, %0\n"
247 	"	srlx		%0, %3, %0\n"
248 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
249 	"	.word		661b\n"
250 	"	sllx		%1, %4, %0\n"
251 	"	srlx		%0, %5, %0\n"
252 	"	.previous\n"
253 	: "=r" (ret)
254 	: "r" (pte_val(pte)),
255 	  "i" (21), "i" (21 + PAGE_SHIFT),
256 	  "i" (8), "i" (8 + PAGE_SHIFT));
257 
258 	return ret;
259 }
260 #define pte_page(x) pfn_to_page(pte_pfn(x))
261 
262 static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
263 {
264 	unsigned long mask, tmp;
265 
266 	/* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
267 	 * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
268 	 *
269 	 * Even if we use negation tricks the result is still a 6
270 	 * instruction sequence, so don't try to play fancy and just
271 	 * do the most straightforward implementation.
272 	 *
273 	 * Note: We encode this into 3 sun4v 2-insn patch sequences.
274 	 */
275 
276 	BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
277 	__asm__ __volatile__(
278 	"\n661:	sethi		%%uhi(%2), %1\n"
279 	"	sethi		%%hi(%2), %0\n"
280 	"\n662:	or		%1, %%ulo(%2), %1\n"
281 	"	or		%0, %%lo(%2), %0\n"
282 	"\n663:	sllx		%1, 32, %1\n"
283 	"	or		%0, %1, %0\n"
284 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
285 	"	.word		661b\n"
286 	"	sethi		%%uhi(%3), %1\n"
287 	"	sethi		%%hi(%3), %0\n"
288 	"	.word		662b\n"
289 	"	or		%1, %%ulo(%3), %1\n"
290 	"	or		%0, %%lo(%3), %0\n"
291 	"	.word		663b\n"
292 	"	sllx		%1, 32, %1\n"
293 	"	or		%0, %1, %0\n"
294 	"	.previous\n"
295 	"	.section	.sun_m7_2insn_patch, \"ax\"\n"
296 	"	.word		661b\n"
297 	"	sethi		%%uhi(%4), %1\n"
298 	"	sethi		%%hi(%4), %0\n"
299 	"	.word		662b\n"
300 	"	or		%1, %%ulo(%4), %1\n"
301 	"	or		%0, %%lo(%4), %0\n"
302 	"	.word		663b\n"
303 	"	sllx		%1, 32, %1\n"
304 	"	or		%0, %1, %0\n"
305 	"	.previous\n"
306 	: "=r" (mask), "=r" (tmp)
307 	: "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
308 	       _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
309 	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
310 	  "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
311 	       _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
312 	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V),
313 	  "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
314 	       _PAGE_CP_4V | _PAGE_E_4V |
315 	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
316 
317 	return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
318 }
319 
320 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
321 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
322 {
323 	pte_t pte = __pte(pmd_val(pmd));
324 
325 	pte = pte_modify(pte, newprot);
326 
327 	return __pmd(pte_val(pte));
328 }
329 #endif
330 
331 static inline pgprot_t pgprot_noncached(pgprot_t prot)
332 {
333 	unsigned long val = pgprot_val(prot);
334 
335 	__asm__ __volatile__(
336 	"\n661:	andn		%0, %2, %0\n"
337 	"	or		%0, %3, %0\n"
338 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
339 	"	.word		661b\n"
340 	"	andn		%0, %4, %0\n"
341 	"	or		%0, %5, %0\n"
342 	"	.previous\n"
343 	"	.section	.sun_m7_2insn_patch, \"ax\"\n"
344 	"	.word		661b\n"
345 	"	andn		%0, %6, %0\n"
346 	"	or		%0, %5, %0\n"
347 	"	.previous\n"
348 	: "=r" (val)
349 	: "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
350 	             "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V),
351 	             "i" (_PAGE_CP_4V));
352 
353 	return __pgprot(val);
354 }
355 /* Various pieces of code check for platform support by ifdef testing
356  * on "pgprot_noncached".  That's broken and should be fixed, but for
357  * now...
358  */
359 #define pgprot_noncached pgprot_noncached
360 
361 static inline unsigned long pte_dirty(pte_t pte)
362 {
363 	unsigned long mask;
364 
365 	__asm__ __volatile__(
366 	"\n661:	mov		%1, %0\n"
367 	"	nop\n"
368 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
369 	"	.word		661b\n"
370 	"	sethi		%%uhi(%2), %0\n"
371 	"	sllx		%0, 32, %0\n"
372 	"	.previous\n"
373 	: "=r" (mask)
374 	: "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
375 
376 	return (pte_val(pte) & mask);
377 }
378 
379 static inline unsigned long pte_write(pte_t pte)
380 {
381 	unsigned long mask;
382 
383 	__asm__ __volatile__(
384 	"\n661:	mov		%1, %0\n"
385 	"	nop\n"
386 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
387 	"	.word		661b\n"
388 	"	sethi		%%uhi(%2), %0\n"
389 	"	sllx		%0, 32, %0\n"
390 	"	.previous\n"
391 	: "=r" (mask)
392 	: "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
393 
394 	return (pte_val(pte) & mask);
395 }
396 
397 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
398 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
399 #define arch_make_huge_pte arch_make_huge_pte
400 static inline unsigned long __pte_default_huge_mask(void)
401 {
402 	unsigned long mask;
403 
404 	__asm__ __volatile__(
405 	"\n661:	sethi		%%uhi(%1), %0\n"
406 	"	sllx		%0, 32, %0\n"
407 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
408 	"	.word		661b\n"
409 	"	mov		%2, %0\n"
410 	"	nop\n"
411 	"	.previous\n"
412 	: "=r" (mask)
413 	: "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
414 
415 	return mask;
416 }
417 
418 static inline pte_t pte_mkhuge(pte_t pte)
419 {
420 	return __pte(pte_val(pte) | __pte_default_huge_mask());
421 }
422 
423 static inline bool is_default_hugetlb_pte(pte_t pte)
424 {
425 	unsigned long mask = __pte_default_huge_mask();
426 
427 	return (pte_val(pte) & mask) == mask;
428 }
429 
430 static inline bool is_hugetlb_pmd(pmd_t pmd)
431 {
432 	return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
433 }
434 
435 static inline bool is_hugetlb_pud(pud_t pud)
436 {
437 	return !!(pud_val(pud) & _PAGE_PUD_HUGE);
438 }
439 
440 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
441 static inline pmd_t pmd_mkhuge(pmd_t pmd)
442 {
443 	pte_t pte = __pte(pmd_val(pmd));
444 
445 	pte = pte_mkhuge(pte);
446 	pte_val(pte) |= _PAGE_PMD_HUGE;
447 
448 	return __pmd(pte_val(pte));
449 }
450 #endif
451 #else
452 static inline bool is_hugetlb_pte(pte_t pte)
453 {
454 	return false;
455 }
456 #endif
457 
458 static inline pte_t __pte_mkhwwrite(pte_t pte)
459 {
460 	unsigned long val = pte_val(pte);
461 
462 	/*
463 	 * Note: we only want to set the HW writable bit if the SW writable bit
464 	 * and the SW dirty bit are set.
465 	 */
466 	__asm__ __volatile__(
467 	"\n661:	or		%0, %2, %0\n"
468 	"	.section	.sun4v_1insn_patch, \"ax\"\n"
469 	"	.word		661b\n"
470 	"	or		%0, %3, %0\n"
471 	"	.previous\n"
472 	: "=r" (val)
473 	: "0" (val), "i" (_PAGE_W_4U), "i" (_PAGE_W_4V));
474 
475 	return __pte(val);
476 }
477 
478 static inline pte_t pte_mkdirty(pte_t pte)
479 {
480 	unsigned long val = pte_val(pte), mask;
481 
482 	__asm__ __volatile__(
483 	"\n661:	mov		%1, %0\n"
484 	"	nop\n"
485 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
486 	"	.word		661b\n"
487 	"	sethi		%%uhi(%2), %0\n"
488 	"	sllx		%0, 32, %0\n"
489 	"	.previous\n"
490 	: "=r" (mask)
491 	: "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
492 
493 	pte = __pte(val | mask);
494 	return pte_write(pte) ? __pte_mkhwwrite(pte) : pte;
495 }
496 
497 static inline pte_t pte_mkclean(pte_t pte)
498 {
499 	unsigned long val = pte_val(pte), tmp;
500 
501 	__asm__ __volatile__(
502 	"\n661:	andn		%0, %3, %0\n"
503 	"	nop\n"
504 	"\n662:	nop\n"
505 	"	nop\n"
506 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
507 	"	.word		661b\n"
508 	"	sethi		%%uhi(%4), %1\n"
509 	"	sllx		%1, 32, %1\n"
510 	"	.word		662b\n"
511 	"	or		%1, %%lo(%4), %1\n"
512 	"	andn		%0, %1, %0\n"
513 	"	.previous\n"
514 	: "=r" (val), "=r" (tmp)
515 	: "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
516 	  "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
517 
518 	return __pte(val);
519 }
520 
521 static inline pte_t pte_mkwrite_novma(pte_t pte)
522 {
523 	unsigned long val = pte_val(pte), mask;
524 
525 	__asm__ __volatile__(
526 	"\n661:	mov		%1, %0\n"
527 	"	nop\n"
528 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
529 	"	.word		661b\n"
530 	"	sethi		%%uhi(%2), %0\n"
531 	"	sllx		%0, 32, %0\n"
532 	"	.previous\n"
533 	: "=r" (mask)
534 	: "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
535 
536 	pte = __pte(val | mask);
537 	return pte_dirty(pte) ? __pte_mkhwwrite(pte) : pte;
538 }
539 
540 static inline pte_t pte_wrprotect(pte_t pte)
541 {
542 	unsigned long val = pte_val(pte), tmp;
543 
544 	__asm__ __volatile__(
545 	"\n661:	andn		%0, %3, %0\n"
546 	"	nop\n"
547 	"\n662:	nop\n"
548 	"	nop\n"
549 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
550 	"	.word		661b\n"
551 	"	sethi		%%uhi(%4), %1\n"
552 	"	sllx		%1, 32, %1\n"
553 	"	.word		662b\n"
554 	"	or		%1, %%lo(%4), %1\n"
555 	"	andn		%0, %1, %0\n"
556 	"	.previous\n"
557 	: "=r" (val), "=r" (tmp)
558 	: "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
559 	  "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
560 
561 	return __pte(val);
562 }
563 
564 static inline pte_t pte_mkold(pte_t pte)
565 {
566 	unsigned long mask;
567 
568 	__asm__ __volatile__(
569 	"\n661:	mov		%1, %0\n"
570 	"	nop\n"
571 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
572 	"	.word		661b\n"
573 	"	sethi		%%uhi(%2), %0\n"
574 	"	sllx		%0, 32, %0\n"
575 	"	.previous\n"
576 	: "=r" (mask)
577 	: "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
578 
579 	mask |= _PAGE_R;
580 
581 	return __pte(pte_val(pte) & ~mask);
582 }
583 
584 static inline pte_t pte_mkyoung(pte_t pte)
585 {
586 	unsigned long mask;
587 
588 	__asm__ __volatile__(
589 	"\n661:	mov		%1, %0\n"
590 	"	nop\n"
591 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
592 	"	.word		661b\n"
593 	"	sethi		%%uhi(%2), %0\n"
594 	"	sllx		%0, 32, %0\n"
595 	"	.previous\n"
596 	: "=r" (mask)
597 	: "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
598 
599 	mask |= _PAGE_R;
600 
601 	return __pte(pte_val(pte) | mask);
602 }
603 
604 static inline pte_t pte_mkspecial(pte_t pte)
605 {
606 	pte_val(pte) |= _PAGE_SPECIAL;
607 	return pte;
608 }
609 
610 static inline pte_t pte_mkmcd(pte_t pte)
611 {
612 	pte_val(pte) |= _PAGE_MCD_4V;
613 	return pte;
614 }
615 
616 static inline pte_t pte_mknotmcd(pte_t pte)
617 {
618 	pte_val(pte) &= ~_PAGE_MCD_4V;
619 	return pte;
620 }
621 
622 static inline unsigned long pte_young(pte_t pte)
623 {
624 	unsigned long mask;
625 
626 	__asm__ __volatile__(
627 	"\n661:	mov		%1, %0\n"
628 	"	nop\n"
629 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
630 	"	.word		661b\n"
631 	"	sethi		%%uhi(%2), %0\n"
632 	"	sllx		%0, 32, %0\n"
633 	"	.previous\n"
634 	: "=r" (mask)
635 	: "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
636 
637 	return (pte_val(pte) & mask);
638 }
639 
640 static inline unsigned long pte_exec(pte_t pte)
641 {
642 	unsigned long mask;
643 
644 	__asm__ __volatile__(
645 	"\n661:	sethi		%%hi(%1), %0\n"
646 	"	.section	.sun4v_1insn_patch, \"ax\"\n"
647 	"	.word		661b\n"
648 	"	mov		%2, %0\n"
649 	"	.previous\n"
650 	: "=r" (mask)
651 	: "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
652 
653 	return (pte_val(pte) & mask);
654 }
655 
656 static inline unsigned long pte_present(pte_t pte)
657 {
658 	unsigned long val = pte_val(pte);
659 
660 	__asm__ __volatile__(
661 	"\n661:	and		%0, %2, %0\n"
662 	"	.section	.sun4v_1insn_patch, \"ax\"\n"
663 	"	.word		661b\n"
664 	"	and		%0, %3, %0\n"
665 	"	.previous\n"
666 	: "=r" (val)
667 	: "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
668 
669 	return val;
670 }
671 
672 #define pte_accessible pte_accessible
673 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
674 {
675 	return pte_val(a) & _PAGE_VALID;
676 }
677 
678 static inline unsigned long pte_special(pte_t pte)
679 {
680 	return pte_val(pte) & _PAGE_SPECIAL;
681 }
682 
683 #define pmd_leaf	pmd_large
684 static inline unsigned long pmd_large(pmd_t pmd)
685 {
686 	pte_t pte = __pte(pmd_val(pmd));
687 
688 	return pte_val(pte) & _PAGE_PMD_HUGE;
689 }
690 
691 static inline unsigned long pmd_pfn(pmd_t pmd)
692 {
693 	pte_t pte = __pte(pmd_val(pmd));
694 
695 	return pte_pfn(pte);
696 }
697 
698 #define pmd_write pmd_write
699 static inline unsigned long pmd_write(pmd_t pmd)
700 {
701 	pte_t pte = __pte(pmd_val(pmd));
702 
703 	return pte_write(pte);
704 }
705 
706 #define pud_write(pud)	pte_write(__pte(pud_val(pud)))
707 
708 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
709 #define pmd_dirty pmd_dirty
710 static inline unsigned long pmd_dirty(pmd_t pmd)
711 {
712 	pte_t pte = __pte(pmd_val(pmd));
713 
714 	return pte_dirty(pte);
715 }
716 
717 #define pmd_young pmd_young
718 static inline unsigned long pmd_young(pmd_t pmd)
719 {
720 	pte_t pte = __pte(pmd_val(pmd));
721 
722 	return pte_young(pte);
723 }
724 
725 static inline unsigned long pmd_trans_huge(pmd_t pmd)
726 {
727 	pte_t pte = __pte(pmd_val(pmd));
728 
729 	return pte_val(pte) & _PAGE_PMD_HUGE;
730 }
731 
732 static inline pmd_t pmd_mkold(pmd_t pmd)
733 {
734 	pte_t pte = __pte(pmd_val(pmd));
735 
736 	pte = pte_mkold(pte);
737 
738 	return __pmd(pte_val(pte));
739 }
740 
741 static inline pmd_t pmd_wrprotect(pmd_t pmd)
742 {
743 	pte_t pte = __pte(pmd_val(pmd));
744 
745 	pte = pte_wrprotect(pte);
746 
747 	return __pmd(pte_val(pte));
748 }
749 
750 static inline pmd_t pmd_mkdirty(pmd_t pmd)
751 {
752 	pte_t pte = __pte(pmd_val(pmd));
753 
754 	pte = pte_mkdirty(pte);
755 
756 	return __pmd(pte_val(pte));
757 }
758 
759 static inline pmd_t pmd_mkclean(pmd_t pmd)
760 {
761 	pte_t pte = __pte(pmd_val(pmd));
762 
763 	pte = pte_mkclean(pte);
764 
765 	return __pmd(pte_val(pte));
766 }
767 
768 static inline pmd_t pmd_mkyoung(pmd_t pmd)
769 {
770 	pte_t pte = __pte(pmd_val(pmd));
771 
772 	pte = pte_mkyoung(pte);
773 
774 	return __pmd(pte_val(pte));
775 }
776 
777 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
778 {
779 	pte_t pte = __pte(pmd_val(pmd));
780 
781 	pte = pte_mkwrite_novma(pte);
782 
783 	return __pmd(pte_val(pte));
784 }
785 
786 static inline pgprot_t pmd_pgprot(pmd_t entry)
787 {
788 	unsigned long val = pmd_val(entry);
789 
790 	return __pgprot(val);
791 }
792 #endif
793 
794 static inline int pmd_present(pmd_t pmd)
795 {
796 	return pmd_val(pmd) != 0UL;
797 }
798 
799 #define pmd_none(pmd)			(!pmd_val(pmd))
800 
801 /* pmd_bad() is only called on non-trans-huge PMDs.  Our encoding is
802  * very simple, it's just the physical address.  PTE tables are of
803  * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
804  * the top bits outside of the range of any physical address size we
805  * support are clear as well.  We also validate the physical itself.
806  */
807 #define pmd_bad(pmd)			(pmd_val(pmd) & ~PAGE_MASK)
808 
809 #define pud_none(pud)			(!pud_val(pud))
810 
811 #define pud_bad(pud)			(pud_val(pud) & ~PAGE_MASK)
812 
813 #define p4d_none(p4d)			(!p4d_val(p4d))
814 
815 #define p4d_bad(p4d)			(p4d_val(p4d) & ~PAGE_MASK)
816 
817 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
818 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
819 		pmd_t *pmdp, pmd_t pmd);
820 #else
821 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
822 			      pmd_t *pmdp, pmd_t pmd)
823 {
824 	*pmdp = pmd;
825 }
826 #endif
827 
828 static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
829 {
830 	unsigned long val = __pa((unsigned long) (ptep));
831 
832 	pmd_val(*pmdp) = val;
833 }
834 
835 #define pud_set(pudp, pmdp)	\
836 	(pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
837 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
838 {
839 	pte_t pte = __pte(pmd_val(pmd));
840 	unsigned long pfn;
841 
842 	pfn = pte_pfn(pte);
843 
844 	return ((unsigned long) __va(pfn << PAGE_SHIFT));
845 }
846 
847 static inline pmd_t *pud_pgtable(pud_t pud)
848 {
849 	pte_t pte = __pte(pud_val(pud));
850 	unsigned long pfn;
851 
852 	pfn = pte_pfn(pte);
853 
854 	return ((pmd_t *) __va(pfn << PAGE_SHIFT));
855 }
856 
857 #define pmd_page(pmd) 			virt_to_page((void *)pmd_page_vaddr(pmd))
858 #define pud_page(pud)			virt_to_page((void *)pud_pgtable(pud))
859 #define pmd_clear(pmdp)			(pmd_val(*(pmdp)) = 0UL)
860 #define pud_present(pud)		(pud_val(pud) != 0U)
861 #define pud_clear(pudp)			(pud_val(*(pudp)) = 0UL)
862 #define p4d_pgtable(p4d)		\
863 	((pud_t *) __va(p4d_val(p4d)))
864 #define p4d_present(p4d)		(p4d_val(p4d) != 0U)
865 #define p4d_clear(p4dp)			(p4d_val(*(p4dp)) = 0UL)
866 
867 /* only used by the stubbed out hugetlb gup code, should never be called */
868 #define p4d_page(p4d)			NULL
869 
870 #define pud_leaf	pud_large
871 static inline unsigned long pud_large(pud_t pud)
872 {
873 	pte_t pte = __pte(pud_val(pud));
874 
875 	return pte_val(pte) & _PAGE_PMD_HUGE;
876 }
877 
878 static inline unsigned long pud_pfn(pud_t pud)
879 {
880 	pte_t pte = __pte(pud_val(pud));
881 
882 	return pte_pfn(pte);
883 }
884 
885 /* Same in both SUN4V and SUN4U.  */
886 #define pte_none(pte) 			(!pte_val(pte))
887 
888 #define p4d_set(p4dp, pudp)	\
889 	(p4d_val(*(p4dp)) = (__pa((unsigned long) (pudp))))
890 
891 /* We cannot include <linux/mm_types.h> at this point yet: */
892 extern struct mm_struct init_mm;
893 
894 /* Actual page table PTE updates.  */
895 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
896 		   pte_t *ptep, pte_t orig, int fullmm,
897 		   unsigned int hugepage_shift);
898 
899 static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
900 				pte_t *ptep, pte_t orig, int fullmm,
901 				unsigned int hugepage_shift)
902 {
903 	/* It is more efficient to let flush_tlb_kernel_range()
904 	 * handle init_mm tlb flushes.
905 	 *
906 	 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
907 	 *             and SUN4V pte layout, so this inline test is fine.
908 	 */
909 	if (likely(mm != &init_mm) && pte_accessible(mm, orig))
910 		tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift);
911 }
912 
913 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
914 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
915 					    unsigned long addr,
916 					    pmd_t *pmdp)
917 {
918 	pmd_t pmd = *pmdp;
919 	set_pmd_at(mm, addr, pmdp, __pmd(0UL));
920 	return pmd;
921 }
922 
923 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
924 			     pte_t *ptep, pte_t pte, int fullmm)
925 {
926 	pte_t orig = *ptep;
927 
928 	*ptep = pte;
929 	maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT);
930 }
931 
932 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
933 		pte_t *ptep, pte_t pte, unsigned int nr)
934 {
935 	arch_enter_lazy_mmu_mode();
936 	for (;;) {
937 		__set_pte_at(mm, addr, ptep, pte, 0);
938 		if (--nr == 0)
939 			break;
940 		ptep++;
941 		pte_val(pte) += PAGE_SIZE;
942 		addr += PAGE_SIZE;
943 	}
944 	arch_leave_lazy_mmu_mode();
945 }
946 #define set_ptes set_ptes
947 
948 #define pte_clear(mm,addr,ptep)		\
949 	set_pte_at((mm), (addr), (ptep), __pte(0UL))
950 
951 #define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
952 #define pte_clear_not_present_full(mm,addr,ptep,fullmm)	\
953 	__set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
954 
955 #ifdef DCACHE_ALIASING_POSSIBLE
956 #define __HAVE_ARCH_MOVE_PTE
957 #define move_pte(pte, prot, old_addr, new_addr)				\
958 ({									\
959 	pte_t newpte = (pte);						\
960 	if (tlb_type != hypervisor && pte_present(pte)) {		\
961 		unsigned long this_pfn = pte_pfn(pte);			\
962 									\
963 		if (pfn_valid(this_pfn) &&				\
964 		    (((old_addr) ^ (new_addr)) & (1 << 13)))		\
965 			flush_dcache_folio_all(current->mm,		\
966 				page_folio(pfn_to_page(this_pfn)));	\
967 	}								\
968 	newpte;								\
969 })
970 #endif
971 
972 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
973 
974 void paging_init(void);
975 unsigned long find_ecache_flush_span(unsigned long size);
976 
977 struct seq_file;
978 void mmu_info(struct seq_file *);
979 
980 struct vm_area_struct;
981 void update_mmu_cache_range(struct vm_fault *, struct vm_area_struct *,
982 		unsigned long addr, pte_t *ptep, unsigned int nr);
983 #define update_mmu_cache(vma, addr, ptep) \
984 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
985 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
986 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
987 			  pmd_t *pmd);
988 
989 #define __HAVE_ARCH_PMDP_INVALIDATE
990 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
991 			    pmd_t *pmdp);
992 
993 #define __HAVE_ARCH_PGTABLE_DEPOSIT
994 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
995 				pgtable_t pgtable);
996 
997 #define __HAVE_ARCH_PGTABLE_WITHDRAW
998 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
999 #endif
1000 
1001 /*
1002  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
1003  * are !pte_none() && !pte_present().
1004  *
1005  * Format of swap PTEs:
1006  *
1007  *   6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
1008  *   3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
1009  *   <--------------------------- offset ---------------------------
1010  *
1011  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
1012  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
1013  *   --------------------> E <-- type ---> <------- zeroes -------->
1014  */
1015 #define __swp_type(entry)	(((entry).val >> PAGE_SHIFT) & 0x7fUL)
1016 #define __swp_offset(entry)	((entry).val >> (PAGE_SHIFT + 8UL))
1017 #define __swp_entry(type, offset)	\
1018 	( (swp_entry_t) \
1019 	  { \
1020 		((((long)(type) & 0x7fUL) << PAGE_SHIFT) | \
1021                  ((long)(offset) << (PAGE_SHIFT + 8UL))) \
1022 	  } )
1023 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
1024 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
1025 
1026 static inline int pte_swp_exclusive(pte_t pte)
1027 {
1028 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
1029 }
1030 
1031 static inline pte_t pte_swp_mkexclusive(pte_t pte)
1032 {
1033 	return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
1034 }
1035 
1036 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
1037 {
1038 	return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
1039 }
1040 
1041 int page_in_phys_avail(unsigned long paddr);
1042 
1043 /*
1044  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
1045  * its high 4 bits.  These macros/functions put it there or get it from there.
1046  */
1047 #define MK_IOSPACE_PFN(space, pfn)	(pfn | (space << (BITS_PER_LONG - 4)))
1048 #define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
1049 #define GET_PFN(pfn)			(pfn & 0x0fffffffffffffffUL)
1050 
1051 int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
1052 		    unsigned long, pgprot_t);
1053 
1054 void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
1055 		      unsigned long addr, pte_t pte);
1056 
1057 int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
1058 		  unsigned long addr, pte_t oldpte);
1059 
1060 #define __HAVE_ARCH_DO_SWAP_PAGE
1061 static inline void arch_do_swap_page(struct mm_struct *mm,
1062 				     struct vm_area_struct *vma,
1063 				     unsigned long addr,
1064 				     pte_t pte, pte_t oldpte)
1065 {
1066 	/* If this is a new page being mapped in, there can be no
1067 	 * ADI tags stored away for this page. Skip looking for
1068 	 * stored tags
1069 	 */
1070 	if (pte_none(oldpte))
1071 		return;
1072 
1073 	if (adi_state.enabled && (pte_val(pte) & _PAGE_MCD_4V))
1074 		adi_restore_tags(mm, vma, addr, pte);
1075 }
1076 
1077 #define __HAVE_ARCH_UNMAP_ONE
1078 static inline int arch_unmap_one(struct mm_struct *mm,
1079 				 struct vm_area_struct *vma,
1080 				 unsigned long addr, pte_t oldpte)
1081 {
1082 	if (adi_state.enabled && (pte_val(oldpte) & _PAGE_MCD_4V))
1083 		return adi_save_tags(mm, vma, addr, oldpte);
1084 	return 0;
1085 }
1086 
1087 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
1088 				     unsigned long from, unsigned long pfn,
1089 				     unsigned long size, pgprot_t prot)
1090 {
1091 	unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
1092 	int space = GET_IOSPACE(pfn);
1093 	unsigned long phys_base;
1094 
1095 	phys_base = offset | (((unsigned long) space) << 32UL);
1096 
1097 	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
1098 }
1099 #define io_remap_pfn_range io_remap_pfn_range
1100 
1101 static inline unsigned long __untagged_addr(unsigned long start)
1102 {
1103 	if (adi_capable()) {
1104 		long addr = start;
1105 
1106 		/* If userspace has passed a versioned address, kernel
1107 		 * will not find it in the VMAs since it does not store
1108 		 * the version tags in the list of VMAs. Storing version
1109 		 * tags in list of VMAs is impractical since they can be
1110 		 * changed any time from userspace without dropping into
1111 		 * kernel. Any address search in VMAs will be done with
1112 		 * non-versioned addresses. Ensure the ADI version bits
1113 		 * are dropped here by sign extending the last bit before
1114 		 * ADI bits. IOMMU does not implement version tags.
1115 		 */
1116 		return (addr << (long)adi_nbits()) >> (long)adi_nbits();
1117 	}
1118 
1119 	return start;
1120 }
1121 #define untagged_addr(addr) \
1122 	((__typeof__(addr))(__untagged_addr((unsigned long)(addr))))
1123 
1124 static inline bool pte_access_permitted(pte_t pte, bool write)
1125 {
1126 	u64 prot;
1127 
1128 	if (tlb_type == hypervisor) {
1129 		prot = _PAGE_PRESENT_4V | _PAGE_P_4V;
1130 		if (write)
1131 			prot |= _PAGE_WRITE_4V;
1132 	} else {
1133 		prot = _PAGE_PRESENT_4U | _PAGE_P_4U;
1134 		if (write)
1135 			prot |= _PAGE_WRITE_4U;
1136 	}
1137 
1138 	return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot;
1139 }
1140 #define pte_access_permitted pte_access_permitted
1141 
1142 /* We provide our own get_unmapped_area to cope with VA holes and
1143  * SHM area cache aliasing for userland.
1144  */
1145 #define HAVE_ARCH_UNMAPPED_AREA
1146 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1147 
1148 /* We provide a special get_unmapped_area for framebuffer mmaps to try and use
1149  * the largest alignment possible such that larget PTEs can be used.
1150  */
1151 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
1152 				   unsigned long, unsigned long,
1153 				   unsigned long);
1154 #define HAVE_ARCH_FB_UNMAPPED_AREA
1155 
1156 void sun4v_register_fault_status(void);
1157 void sun4v_ktsb_register(void);
1158 void __init cheetah_ecache_flush_init(void);
1159 void sun4v_patch_tlb_handlers(void);
1160 
1161 extern unsigned long cmdline_memory_size;
1162 
1163 asmlinkage void do_sparc64_fault(struct pt_regs *regs);
1164 
1165 #define pmd_pgtable(PMD)	((pte_t *)pmd_page_vaddr(PMD))
1166 
1167 #ifdef CONFIG_HUGETLB_PAGE
1168 
1169 #define pud_leaf_size pud_leaf_size
1170 extern unsigned long pud_leaf_size(pud_t pud);
1171 
1172 #define pmd_leaf_size pmd_leaf_size
1173 extern unsigned long pmd_leaf_size(pmd_t pmd);
1174 
1175 #define pte_leaf_size pte_leaf_size
1176 extern unsigned long pte_leaf_size(pte_t pte);
1177 
1178 #endif /* CONFIG_HUGETLB_PAGE */
1179 
1180 #endif /* !(__ASSEMBLY__) */
1181 
1182 #endif /* !(_SPARC64_PGTABLE_H) */
1183