1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
4 * Copyright (C) 2012 Regents of the University of California
5 * Copyright (C) 2017 SiFive
6 * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn>
7 */
8
9 #ifndef _ASM_RISCV_PAGE_H
10 #define _ASM_RISCV_PAGE_H
11
12 #include <linux/pfn.h>
13 #include <linux/const.h>
14
15 #include <vdso/page.h>
16
17 #define HPAGE_SHIFT PMD_SHIFT
18 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
19 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
20 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
21
22 /*
23 * PAGE_OFFSET -- the first address of the first page of memory.
24 * When not using MMU this corresponds to the first free page in
25 * physical memory (aligned on a page boundary).
26 */
27 #ifdef CONFIG_64BIT
28 #ifdef CONFIG_MMU
29 #define PAGE_OFFSET kernel_map.page_offset
30 #else
31 #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
32 #endif
33 /*
34 * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
35 * define the PAGE_OFFSET value for SV48 and SV39.
36 */
37 #define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
38 #define PAGE_OFFSET_L3 _AC(0xffffffd600000000, UL)
39 #else
40 #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
41 #endif /* CONFIG_64BIT */
42
43 #ifndef __ASSEMBLY__
44
45 #ifdef CONFIG_RISCV_ISA_ZICBOZ
46 void clear_page(void *page);
47 #else
48 #define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
49 #endif
50 #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
51
52 #define clear_user_page(pgaddr, vaddr, page) clear_page(pgaddr)
53 #define copy_user_page(vto, vfrom, vaddr, topg) \
54 memcpy((vto), (vfrom), PAGE_SIZE)
55
56 /*
57 * Use struct definitions to apply C type checking
58 */
59
60 /* Page Global Directory entry */
61 typedef struct {
62 unsigned long pgd;
63 } pgd_t;
64
65 /* Page Table entry */
66 typedef struct {
67 unsigned long pte;
68 } pte_t;
69
70 typedef struct {
71 unsigned long pgprot;
72 } pgprot_t;
73
74 typedef struct page *pgtable_t;
75
76 #define pte_val(x) ((x).pte)
77 #define pgd_val(x) ((x).pgd)
78 #define pgprot_val(x) ((x).pgprot)
79
80 #define __pte(x) ((pte_t) { (x) })
81 #define __pgd(x) ((pgd_t) { (x) })
82 #define __pgprot(x) ((pgprot_t) { (x) })
83
84 #ifdef CONFIG_64BIT
85 #define PTE_FMT "%016lx"
86 #else
87 #define PTE_FMT "%08lx"
88 #endif
89
90 #if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
91 /*
92 * We override this value as its generic definition uses __pa too early in
93 * the boot process (before kernel_map.va_pa_offset is set).
94 */
95 #define MIN_MEMBLOCK_ADDR 0
96 #endif
97
98 #ifdef CONFIG_MMU
99 #define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base))
100 #else
101 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
102 #endif /* CONFIG_MMU */
103
104 struct kernel_mapping {
105 unsigned long page_offset;
106 unsigned long virt_addr;
107 unsigned long virt_offset;
108 uintptr_t phys_addr;
109 uintptr_t size;
110 /* Offset between linear mapping virtual address and kernel load address */
111 unsigned long va_pa_offset;
112 /* Offset between kernel mapping virtual address and kernel load address */
113 #ifdef CONFIG_XIP_KERNEL
114 unsigned long va_kernel_xip_text_pa_offset;
115 unsigned long va_kernel_xip_data_pa_offset;
116 uintptr_t xiprom;
117 uintptr_t xiprom_sz;
118 #else
119 unsigned long va_kernel_pa_offset;
120 #endif
121 };
122
123 extern struct kernel_mapping kernel_map;
124 extern phys_addr_t phys_ram_base;
125
126 #define is_kernel_mapping(x) \
127 ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
128
129 #define is_linear_mapping(x) \
130 ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE))
131
132 #ifndef CONFIG_DEBUG_VIRTUAL
133 #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
134 #else
135 void *linear_mapping_pa_to_va(unsigned long x);
136 #endif
137
138 #ifdef CONFIG_XIP_KERNEL
139 #define kernel_mapping_pa_to_va(y) ({ \
140 unsigned long _y = (unsigned long)(y); \
141 (_y < phys_ram_base) ? \
142 (void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) : \
143 (void *)(_y + kernel_map.va_kernel_xip_data_pa_offset); \
144 })
145 #else
146 #define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset))
147 #endif
148
149 #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
150
151 #ifndef CONFIG_DEBUG_VIRTUAL
152 #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
153 #else
154 phys_addr_t linear_mapping_va_to_pa(unsigned long x);
155 #endif
156
157 #ifdef CONFIG_XIP_KERNEL
158 #define kernel_mapping_va_to_pa(y) ({ \
159 unsigned long _y = (unsigned long)(y); \
160 (_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ? \
161 (_y - kernel_map.va_kernel_xip_text_pa_offset) : \
162 (_y - kernel_map.va_kernel_xip_data_pa_offset); \
163 })
164 #else
165 #define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset)
166 #endif
167
168 #define __va_to_pa_nodebug(x) ({ \
169 unsigned long _x = x; \
170 is_linear_mapping(_x) ? \
171 linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
172 })
173
174 #ifdef CONFIG_DEBUG_VIRTUAL
175 extern phys_addr_t __virt_to_phys(unsigned long x);
176 extern phys_addr_t __phys_addr_symbol(unsigned long x);
177 #else
178 #define __virt_to_phys(x) __va_to_pa_nodebug(x)
179 #define __phys_addr_symbol(x) __va_to_pa_nodebug(x)
180 #endif /* CONFIG_DEBUG_VIRTUAL */
181
182 #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
183 #define __pa(x) __virt_to_phys((unsigned long)(x))
184 #define __va(x) ((void *)__pa_to_va_nodebug((phys_addr_t)(x)))
185
186 #define phys_to_pfn(phys) (PFN_DOWN(phys))
187 #define pfn_to_phys(pfn) (PFN_PHYS(pfn))
188
189 #define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr)))
190 #define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn)))
191
192 #define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
193 #define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
194
195 #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
196
197 unsigned long kaslr_offset(void);
198
pfn_to_kaddr(unsigned long pfn)199 static __always_inline void *pfn_to_kaddr(unsigned long pfn)
200 {
201 return __va(pfn << PAGE_SHIFT);
202 }
203
204 #endif /* __ASSEMBLY__ */
205
206 #define virt_addr_valid(vaddr) ({ \
207 unsigned long _addr = (unsigned long)vaddr; \
208 (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
209 })
210
211 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
212
213 #include <asm-generic/memory_model.h>
214 #include <asm-generic/getorder.h>
215
216 #endif /* _ASM_RISCV_PAGE_H */
217