xref: /linux/arch/mips/include/asm/page.h (revision 71dfa617ea9f18e4585fe78364217cd32b1fc382)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 1999, 2000, 03 Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_PAGE_H
10 #define _ASM_PAGE_H
11 
12 #include <spaces.h>
13 #include <linux/const.h>
14 #include <linux/kernel.h>
15 #include <asm/mipsregs.h>
16 
17 /*
18  * PAGE_SHIFT determines the page size
19  */
20 #define PAGE_SHIFT	CONFIG_PAGE_SHIFT
21 #define PAGE_SIZE	(_AC(1,UL) << PAGE_SHIFT)
22 #define PAGE_MASK	(~((1 << PAGE_SHIFT) - 1))
23 
24 /*
25  * This is used for calculating the real page sizes
26  * for FTLB or VTLB + FTLB configurations.
27  */
28 static inline unsigned int page_size_ftlb(unsigned int mmuextdef)
29 {
30 	switch (mmuextdef) {
31 	case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
32 		if (PAGE_SIZE == (1 << 30))
33 			return 5;
34 		if (PAGE_SIZE == (1llu << 32))
35 			return 6;
36 		if (PAGE_SIZE > (256 << 10))
37 			return 7; /* reserved */
38 		fallthrough;
39 	case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
40 		return (PAGE_SHIFT - 10) / 2;
41 	default:
42 		panic("Invalid FTLB configuration with Conf4_mmuextdef=%d value\n",
43 		      mmuextdef >> 14);
44 	}
45 }
46 
47 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
48 #define HPAGE_SHIFT	(PAGE_SHIFT + PAGE_SHIFT - 3)
49 #define HPAGE_SIZE	(_AC(1,UL) << HPAGE_SHIFT)
50 #define HPAGE_MASK	(~(HPAGE_SIZE - 1))
51 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
52 #else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */
53 #define HPAGE_SHIFT	({BUILD_BUG(); 0; })
54 #define HPAGE_SIZE	({BUILD_BUG(); 0; })
55 #define HPAGE_MASK	({BUILD_BUG(); 0; })
56 #define HUGETLB_PAGE_ORDER	({BUILD_BUG(); 0; })
57 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
58 
59 #include <linux/pfn.h>
60 
61 extern void build_clear_page(void);
62 extern void build_copy_page(void);
63 
64 /*
65  * It's normally defined only for FLATMEM config but it's
66  * used in our early mem init code for all memory models.
67  * So always define it.
68  */
69 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
70 extern unsigned long ARCH_PFN_OFFSET;
71 # define ARCH_PFN_OFFSET	ARCH_PFN_OFFSET
72 #else
73 # define ARCH_PFN_OFFSET	PFN_UP(PHYS_OFFSET)
74 #endif
75 
76 extern void clear_page(void * page);
77 extern void copy_page(void * to, void * from);
78 
79 extern unsigned long shm_align_mask;
80 
81 static inline unsigned long pages_do_alias(unsigned long addr1,
82 	unsigned long addr2)
83 {
84 	return (addr1 ^ addr2) & shm_align_mask;
85 }
86 
87 struct page;
88 
89 static inline void clear_user_page(void *addr, unsigned long vaddr,
90 	struct page *page)
91 {
92 	extern void (*flush_data_cache_page)(unsigned long addr);
93 
94 	clear_page(addr);
95 	if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK))
96 		flush_data_cache_page((unsigned long)addr);
97 }
98 
99 struct vm_area_struct;
100 extern void copy_user_highpage(struct page *to, struct page *from,
101 	unsigned long vaddr, struct vm_area_struct *vma);
102 
103 #define __HAVE_ARCH_COPY_USER_HIGHPAGE
104 
105 /*
106  * These are used to make use of C type-checking..
107  */
108 #ifdef CONFIG_PHYS_ADDR_T_64BIT
109   #ifdef CONFIG_CPU_MIPS32
110     typedef struct { unsigned long pte_low, pte_high; } pte_t;
111     #define pte_val(x)	  ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
112     #define __pte(x)	  ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
113   #else
114      typedef struct { unsigned long long pte; } pte_t;
115      #define pte_val(x) ((x).pte)
116      #define __pte(x)	((pte_t) { (x) } )
117   #endif
118 #else
119 typedef struct { unsigned long pte; } pte_t;
120 #define pte_val(x)	((x).pte)
121 #define __pte(x)	((pte_t) { (x) } )
122 #endif
123 typedef struct page *pgtable_t;
124 
125 /*
126  * Right now we don't support 4-level pagetables, so all pud-related
127  * definitions come from <asm-generic/pgtable-nopud.h>.
128  */
129 
130 /*
131  * Finall the top of the hierarchy, the pgd
132  */
133 typedef struct { unsigned long pgd; } pgd_t;
134 #define pgd_val(x)	((x).pgd)
135 #define __pgd(x)	((pgd_t) { (x) } )
136 
137 /*
138  * Manipulate page protection bits
139  */
140 typedef struct { unsigned long pgprot; } pgprot_t;
141 #define pgprot_val(x)	((x).pgprot)
142 #define __pgprot(x)	((pgprot_t) { (x) } )
143 #define pte_pgprot(x)	__pgprot(pte_val(x) & ~_PFN_MASK)
144 
145 /*
146  * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd
147  * pair of pages we only have a single global bit per pair of pages.  When
148  * writing to the TLB make sure we always have the bit set for both pages
149  * or none.  This macro is used to access the `buddy' of the pte we're just
150  * working on.
151  */
152 #define ptep_buddy(x)	((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
153 
154 /*
155  * __pa()/__va() should be used only during mem init.
156  */
157 static inline unsigned long ___pa(unsigned long x)
158 {
159 	if (IS_ENABLED(CONFIG_64BIT)) {
160 		/*
161 		 * For MIPS64 the virtual address may either be in one of
162 		 * the compatibility segments ckseg0 or ckseg1, or it may
163 		 * be in xkphys.
164 		 */
165 		return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
166 	}
167 
168 	if (!IS_ENABLED(CONFIG_EVA)) {
169 		/*
170 		 * We're using the standard MIPS32 legacy memory map, ie.
171 		 * the address x is going to be in kseg0 or kseg1. We can
172 		 * handle either case by masking out the desired bits using
173 		 * CPHYSADDR.
174 		 */
175 		return CPHYSADDR(x);
176 	}
177 
178 	/*
179 	 * EVA is in use so the memory map could be anything, making it not
180 	 * safe to just mask out bits.
181 	 */
182 	return x - PAGE_OFFSET + PHYS_OFFSET;
183 }
184 #define __pa(x)		___pa((unsigned long)(x))
185 #define __va(x)		((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
186 #include <asm/io.h>
187 
188 /*
189  * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
190  * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org).  The
191  * discussion can be found in
192  * https://lore.kernel.org/lkml/a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com
193  *
194  * It is unclear if the misscompilations mentioned in
195  * https://lore.kernel.org/lkml/1281303490-390-1-git-send-email-namhyung@gmail.com
196  * also affect MIPS so we keep this one until GCC 3.x has been retired
197  * before we can apply https://patchwork.linux-mips.org/patch/1541/
198  */
199 #define __pa_symbol_nodebug(x)	__pa(RELOC_HIDE((unsigned long)(x), 0))
200 
201 #ifdef CONFIG_DEBUG_VIRTUAL
202 extern phys_addr_t __phys_addr_symbol(unsigned long x);
203 #else
204 #define __phys_addr_symbol(x)	__pa_symbol_nodebug(x)
205 #endif
206 
207 #ifndef __pa_symbol
208 #define __pa_symbol(x)		__phys_addr_symbol((unsigned long)(x))
209 #endif
210 
211 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
212 
213 #define virt_to_pfn(kaddr)   	PFN_DOWN(virt_to_phys((void *)(kaddr)))
214 #define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
215 
216 extern bool __virt_addr_valid(const volatile void *kaddr);
217 #define virt_addr_valid(kaddr)						\
218 	__virt_addr_valid((const volatile void *) (kaddr))
219 
220 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_TSK_EXEC
221 
222 extern unsigned long __kaslr_offset;
223 static inline unsigned long kaslr_offset(void)
224 {
225 	return __kaslr_offset;
226 }
227 
228 #include <asm-generic/memory_model.h>
229 #include <asm-generic/getorder.h>
230 
231 #endif /* _ASM_PAGE_H */
232