1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com> 4 * Copyright (C) 2012 Regents of the University of California 5 * Copyright (C) 2017 SiFive 6 * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn> 7 */ 8 9 #ifndef _ASM_RISCV_PAGE_H 10 #define _ASM_RISCV_PAGE_H 11 12 #include <linux/pfn.h> 13 #include <linux/const.h> 14 15 #define PAGE_SHIFT CONFIG_PAGE_SHIFT 16 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 17 #define PAGE_MASK (~(PAGE_SIZE - 1)) 18 19 #define HPAGE_SHIFT PMD_SHIFT 20 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 21 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 22 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 23 24 /* 25 * PAGE_OFFSET -- the first address of the first page of memory. 26 * When not using MMU this corresponds to the first free page in 27 * physical memory (aligned on a page boundary). 28 */ 29 #ifdef CONFIG_64BIT 30 #ifdef CONFIG_MMU 31 #define PAGE_OFFSET kernel_map.page_offset 32 #else 33 #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 34 #endif 35 /* 36 * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so 37 * define the PAGE_OFFSET value for SV48 and SV39. 38 */ 39 #define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL) 40 #define PAGE_OFFSET_L3 _AC(0xffffffd600000000, UL) 41 #else 42 #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 43 #endif /* CONFIG_64BIT */ 44 45 #ifndef __ASSEMBLY__ 46 47 #ifdef CONFIG_RISCV_ISA_ZICBOZ 48 void clear_page(void *page); 49 #else 50 #define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) 51 #endif 52 #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) 53 54 #define clear_user_page(pgaddr, vaddr, page) clear_page(pgaddr) 55 #define copy_user_page(vto, vfrom, vaddr, topg) \ 56 memcpy((vto), (vfrom), PAGE_SIZE) 57 58 /* 59 * Use struct definitions to apply C type checking 60 */ 61 62 /* Page Global Directory entry */ 63 typedef struct { 64 unsigned long pgd; 65 } pgd_t; 66 67 /* Page Table entry */ 68 typedef struct { 69 unsigned long pte; 70 } pte_t; 71 72 typedef struct { 73 unsigned long pgprot; 74 } pgprot_t; 75 76 typedef struct page *pgtable_t; 77 78 #define pte_val(x) ((x).pte) 79 #define pgd_val(x) ((x).pgd) 80 #define pgprot_val(x) ((x).pgprot) 81 82 #define __pte(x) ((pte_t) { (x) }) 83 #define __pgd(x) ((pgd_t) { (x) }) 84 #define __pgprot(x) ((pgprot_t) { (x) }) 85 86 #ifdef CONFIG_64BIT 87 #define PTE_FMT "%016lx" 88 #else 89 #define PTE_FMT "%08lx" 90 #endif 91 92 #if defined(CONFIG_64BIT) && defined(CONFIG_MMU) 93 /* 94 * We override this value as its generic definition uses __pa too early in 95 * the boot process (before kernel_map.va_pa_offset is set). 96 */ 97 #define MIN_MEMBLOCK_ADDR 0 98 #endif 99 100 #ifdef CONFIG_MMU 101 #define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base)) 102 #else 103 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) 104 #endif /* CONFIG_MMU */ 105 106 struct kernel_mapping { 107 unsigned long page_offset; 108 unsigned long virt_addr; 109 unsigned long virt_offset; 110 uintptr_t phys_addr; 111 uintptr_t size; 112 /* Offset between linear mapping virtual address and kernel load address */ 113 unsigned long va_pa_offset; 114 /* Offset between kernel mapping virtual address and kernel load address */ 115 #ifdef CONFIG_XIP_KERNEL 116 unsigned long va_kernel_xip_text_pa_offset; 117 unsigned long va_kernel_xip_data_pa_offset; 118 uintptr_t xiprom; 119 uintptr_t xiprom_sz; 120 #else 121 unsigned long va_kernel_pa_offset; 122 #endif 123 }; 124 125 extern struct kernel_mapping kernel_map; 126 extern phys_addr_t phys_ram_base; 127 128 #define is_kernel_mapping(x) \ 129 ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size)) 130 131 #define is_linear_mapping(x) \ 132 ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE)) 133 134 #ifndef CONFIG_DEBUG_VIRTUAL 135 #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset)) 136 #else 137 void *linear_mapping_pa_to_va(unsigned long x); 138 #endif 139 140 #ifdef CONFIG_XIP_KERNEL 141 #define kernel_mapping_pa_to_va(y) ({ \ 142 unsigned long _y = (unsigned long)(y); \ 143 (_y < phys_ram_base) ? \ 144 (void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) : \ 145 (void *)(_y + kernel_map.va_kernel_xip_data_pa_offset); \ 146 }) 147 #else 148 #define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset)) 149 #endif 150 151 #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) 152 153 #ifndef CONFIG_DEBUG_VIRTUAL 154 #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset) 155 #else 156 phys_addr_t linear_mapping_va_to_pa(unsigned long x); 157 #endif 158 159 #ifdef CONFIG_XIP_KERNEL 160 #define kernel_mapping_va_to_pa(y) ({ \ 161 unsigned long _y = (unsigned long)(y); \ 162 (_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ? \ 163 (_y - kernel_map.va_kernel_xip_text_pa_offset) : \ 164 (_y - kernel_map.va_kernel_xip_data_pa_offset); \ 165 }) 166 #else 167 #define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset) 168 #endif 169 170 #define __va_to_pa_nodebug(x) ({ \ 171 unsigned long _x = x; \ 172 is_linear_mapping(_x) ? \ 173 linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \ 174 }) 175 176 #ifdef CONFIG_DEBUG_VIRTUAL 177 extern phys_addr_t __virt_to_phys(unsigned long x); 178 extern phys_addr_t __phys_addr_symbol(unsigned long x); 179 #else 180 #define __virt_to_phys(x) __va_to_pa_nodebug(x) 181 #define __phys_addr_symbol(x) __va_to_pa_nodebug(x) 182 #endif /* CONFIG_DEBUG_VIRTUAL */ 183 184 #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) 185 #define __pa(x) __virt_to_phys((unsigned long)(x)) 186 #define __va(x) ((void *)__pa_to_va_nodebug((phys_addr_t)(x))) 187 188 #define phys_to_pfn(phys) (PFN_DOWN(phys)) 189 #define pfn_to_phys(pfn) (PFN_PHYS(pfn)) 190 191 #define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr))) 192 #define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn))) 193 194 #define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) 195 #define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) 196 197 #define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) 198 #define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) 199 200 #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) 201 202 unsigned long kaslr_offset(void); 203 204 static __always_inline void *pfn_to_kaddr(unsigned long pfn) 205 { 206 return __va(pfn << PAGE_SHIFT); 207 } 208 209 #endif /* __ASSEMBLY__ */ 210 211 #define virt_addr_valid(vaddr) ({ \ 212 unsigned long _addr = (unsigned long)vaddr; \ 213 (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \ 214 }) 215 216 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC 217 218 #include <asm-generic/memory_model.h> 219 #include <asm-generic/getorder.h> 220 221 #endif /* _ASM_RISCV_PAGE_H */ 222