xref: /linux/arch/riscv/include/asm/page.h (revision 52a5a22d8afe3bd195f7b470c7535c63717f5ff7)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
4  * Copyright (C) 2012 Regents of the University of California
5  * Copyright (C) 2017 SiFive
6  * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn>
7  */
8 
9 #ifndef _ASM_RISCV_PAGE_H
10 #define _ASM_RISCV_PAGE_H
11 
12 #include <linux/pfn.h>
13 #include <linux/const.h>
14 
15 #include <vdso/page.h>
16 
17 #define HPAGE_SHIFT		PMD_SHIFT
18 #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
19 #define HPAGE_MASK              (~(HPAGE_SIZE - 1))
20 #define HUGETLB_PAGE_ORDER      (HPAGE_SHIFT - PAGE_SHIFT)
21 
22 /*
23  * PAGE_OFFSET -- the first address of the first page of memory.
24  * When not using MMU this corresponds to the first free page in
25  * physical memory (aligned on a page boundary).
26  */
27 #ifdef CONFIG_64BIT
28 #ifdef CONFIG_MMU
29 #define PAGE_OFFSET		kernel_map.page_offset
30 #else
31 #define PAGE_OFFSET		_AC(CONFIG_PAGE_OFFSET, UL)
32 #endif
33 /*
34  * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
35  * define the PAGE_OFFSET value for SV48 and SV39.
36  */
37 #define PAGE_OFFSET_L4		_AC(0xffffaf8000000000, UL)
38 #define PAGE_OFFSET_L3		_AC(0xffffffd600000000, UL)
39 #else
40 #define PAGE_OFFSET		_AC(CONFIG_PAGE_OFFSET, UL)
41 #endif /* CONFIG_64BIT */
42 
43 #ifndef __ASSEMBLY__
44 
45 #ifdef CONFIG_RISCV_ISA_ZICBOZ
46 void clear_page(void *page);
47 #else
48 #define clear_page(pgaddr)			memset((pgaddr), 0, PAGE_SIZE)
49 #endif
50 #define copy_page(to, from)			memcpy((to), (from), PAGE_SIZE)
51 
52 #define clear_user_page(pgaddr, vaddr, page)	clear_page(pgaddr)
53 #define copy_user_page(vto, vfrom, vaddr, topg) \
54 			memcpy((vto), (vfrom), PAGE_SIZE)
55 
56 /*
57  * Use struct definitions to apply C type checking
58  */
59 
60 /* Page Global Directory entry */
61 typedef struct {
62 	unsigned long pgd;
63 } pgd_t;
64 
65 /* Page Table entry */
66 typedef struct {
67 	unsigned long pte;
68 } pte_t;
69 
70 typedef struct {
71 	unsigned long pgprot;
72 } pgprot_t;
73 
74 typedef struct page *pgtable_t;
75 
76 #define pte_val(x)	((x).pte)
77 #define pgd_val(x)	((x).pgd)
78 #define pgprot_val(x)	((x).pgprot)
79 
80 #define __pte(x)	((pte_t) { (x) })
81 #define __pgd(x)	((pgd_t) { (x) })
82 #define __pgprot(x)	((pgprot_t) { (x) })
83 
84 #ifdef CONFIG_64BIT
85 #define PTE_FMT "%016lx"
86 #else
87 #define PTE_FMT "%08lx"
88 #endif
89 
90 #if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
91 /*
92  * We override this value as its generic definition uses __pa too early in
93  * the boot process (before kernel_map.va_pa_offset is set).
94  */
95 #define MIN_MEMBLOCK_ADDR      0
96 #endif
97 
98 #ifdef CONFIG_MMU
99 #define ARCH_PFN_OFFSET		(PFN_DOWN((unsigned long)phys_ram_base))
100 #else
101 #define ARCH_PFN_OFFSET		(PAGE_OFFSET >> PAGE_SHIFT)
102 #endif /* CONFIG_MMU */
103 
104 struct kernel_mapping {
105 	unsigned long page_offset;
106 	unsigned long virt_addr;
107 	unsigned long virt_offset;
108 	uintptr_t phys_addr;
109 	uintptr_t size;
110 	/* Offset between linear mapping virtual address and kernel load address */
111 	unsigned long va_pa_offset;
112 	/* Offset between kernel mapping virtual address and kernel load address */
113 #ifdef CONFIG_XIP_KERNEL
114 	unsigned long va_kernel_xip_text_pa_offset;
115 	unsigned long va_kernel_xip_data_pa_offset;
116 	uintptr_t xiprom;
117 	uintptr_t xiprom_sz;
118 #else
119 	unsigned long va_kernel_pa_offset;
120 #endif
121 };
122 
123 extern struct kernel_mapping kernel_map;
124 extern phys_addr_t phys_ram_base;
125 extern unsigned long vmemmap_start_pfn;
126 
127 #define is_kernel_mapping(x)	\
128 	((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
129 
130 #define is_linear_mapping(x)	\
131 	((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE))
132 
133 #ifndef CONFIG_DEBUG_VIRTUAL
134 #define linear_mapping_pa_to_va(x)	((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
135 #else
136 void *linear_mapping_pa_to_va(unsigned long x);
137 #endif
138 
139 #ifdef CONFIG_XIP_KERNEL
140 #define kernel_mapping_pa_to_va(y)	({					\
141 	unsigned long _y = (unsigned long)(y);					\
142 	(_y < phys_ram_base) ?							\
143 		(void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) :	\
144 		(void *)(_y + kernel_map.va_kernel_xip_data_pa_offset);		\
145 	})
146 #else
147 #define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset))
148 #endif
149 
150 #define __pa_to_va_nodebug(x)		linear_mapping_pa_to_va(x)
151 
152 #ifndef CONFIG_DEBUG_VIRTUAL
153 #define linear_mapping_va_to_pa(x)	((unsigned long)(x) - kernel_map.va_pa_offset)
154 #else
155 phys_addr_t linear_mapping_va_to_pa(unsigned long x);
156 #endif
157 
158 #ifdef CONFIG_XIP_KERNEL
159 #define kernel_mapping_va_to_pa(y) ({						\
160 	unsigned long _y = (unsigned long)(y);					\
161 	(_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ?			\
162 		(_y - kernel_map.va_kernel_xip_text_pa_offset) :		\
163 		(_y - kernel_map.va_kernel_xip_data_pa_offset);			\
164 	})
165 #else
166 #define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset)
167 #endif
168 
169 #define __va_to_pa_nodebug(x)	({						\
170 	unsigned long _x = x;							\
171 	is_linear_mapping(_x) ?							\
172 		linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x);	\
173 	})
174 
175 #ifdef CONFIG_DEBUG_VIRTUAL
176 extern phys_addr_t __virt_to_phys(unsigned long x);
177 extern phys_addr_t __phys_addr_symbol(unsigned long x);
178 #else
179 #define __virt_to_phys(x)	__va_to_pa_nodebug(x)
180 #define __phys_addr_symbol(x)	__va_to_pa_nodebug(x)
181 #endif /* CONFIG_DEBUG_VIRTUAL */
182 
183 #define __pa_symbol(x)	__phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
184 #define __pa(x)		__virt_to_phys((unsigned long)(x))
185 #define __va(x)		((void *)__pa_to_va_nodebug((phys_addr_t)(x)))
186 
187 #define phys_to_pfn(phys)	(PFN_DOWN(phys))
188 #define pfn_to_phys(pfn)	(PFN_PHYS(pfn))
189 
190 #define virt_to_pfn(vaddr)	(phys_to_pfn(__pa(vaddr)))
191 #define pfn_to_virt(pfn)	(__va(pfn_to_phys(pfn)))
192 
193 #define virt_to_page(vaddr)	(pfn_to_page(virt_to_pfn(vaddr)))
194 #define page_to_virt(page)	(pfn_to_virt(page_to_pfn(page)))
195 
196 #define sym_to_pfn(x)           __phys_to_pfn(__pa_symbol(x))
197 
198 unsigned long kaslr_offset(void);
199 
200 static __always_inline void *pfn_to_kaddr(unsigned long pfn)
201 {
202 	return __va(pfn << PAGE_SHIFT);
203 }
204 
205 #endif /* __ASSEMBLY__ */
206 
207 #define virt_addr_valid(vaddr)	({						\
208 	unsigned long _addr = (unsigned long)vaddr;				\
209 	(unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr));	\
210 })
211 
212 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_NON_EXEC
213 
214 #include <asm-generic/memory_model.h>
215 #include <asm-generic/getorder.h>
216 
217 #endif /* _ASM_RISCV_PAGE_H */
218