1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com> 4 * Copyright (C) 2012 Regents of the University of California 5 * Copyright (C) 2017 SiFive 6 * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn> 7 */ 8 9 #ifndef _ASM_RISCV_PAGE_H 10 #define _ASM_RISCV_PAGE_H 11 12 #include <linux/pfn.h> 13 #include <linux/const.h> 14 15 #include <vdso/page.h> 16 17 #define HPAGE_SHIFT PMD_SHIFT 18 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 19 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 20 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 21 22 /* 23 * PAGE_OFFSET -- the first address of the first page of memory. 24 * When not using MMU this corresponds to the first free page in 25 * physical memory (aligned on a page boundary). 26 */ 27 #ifdef CONFIG_MMU 28 #ifdef CONFIG_64BIT 29 #define PAGE_OFFSET_L5 _AC(0xff60000000000000, UL) 30 #define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL) 31 #define PAGE_OFFSET_L3 _AC(0xffffffd600000000, UL) 32 #ifdef CONFIG_XIP_KERNEL 33 #define PAGE_OFFSET PAGE_OFFSET_L3 34 #else 35 #define PAGE_OFFSET kernel_map.page_offset 36 #endif /* CONFIG_XIP_KERNEL */ 37 #else 38 #define PAGE_OFFSET _AC(0xc0000000, UL) 39 #endif /* CONFIG_64BIT */ 40 #else 41 #define PAGE_OFFSET ((unsigned long)phys_ram_base) 42 #endif /* CONFIG_MMU */ 43 44 #ifndef __ASSEMBLER__ 45 46 #ifdef CONFIG_RISCV_ISA_ZICBOZ 47 void clear_page(void *page); 48 #else 49 #define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) 50 #endif 51 #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) 52 53 #define copy_user_page(vto, vfrom, vaddr, topg) \ 54 memcpy((vto), (vfrom), PAGE_SIZE) 55 56 /* 57 * Use struct definitions to apply C type checking 58 */ 59 60 /* Page Global Directory entry */ 61 typedef struct { 62 unsigned long pgd; 63 } pgd_t; 64 65 /* Page Table entry */ 66 typedef struct { 67 unsigned long pte; 68 } pte_t; 69 70 typedef struct { 71 unsigned long pgprot; 72 } pgprot_t; 73 74 typedef struct page *pgtable_t; 75 76 #define pte_val(x) ((x).pte) 77 #define pgd_val(x) ((x).pgd) 78 #define pgprot_val(x) ((x).pgprot) 79 80 #define __pte(x) ((pte_t) { (x) }) 81 #define __pgd(x) ((pgd_t) { (x) }) 82 #define __pgprot(x) ((pgprot_t) { (x) }) 83 84 #ifdef CONFIG_64BIT 85 #define PTE_FMT "%016lx" 86 #else 87 #define PTE_FMT "%08lx" 88 #endif 89 90 #if defined(CONFIG_64BIT) && defined(CONFIG_MMU) 91 /* 92 * We override this value as its generic definition uses __pa too early in 93 * the boot process (before kernel_map.va_pa_offset is set). 94 */ 95 #define MIN_MEMBLOCK_ADDR 0 96 #endif 97 98 #define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base)) 99 100 struct kernel_mapping { 101 unsigned long virt_addr; 102 unsigned long virt_offset; 103 uintptr_t phys_addr; 104 uintptr_t size; 105 /* Offset between linear mapping virtual address and kernel load address */ 106 unsigned long va_pa_offset; 107 /* Offset between kernel mapping virtual address and kernel load address */ 108 #ifdef CONFIG_XIP_KERNEL 109 unsigned long va_kernel_xip_text_pa_offset; 110 unsigned long va_kernel_xip_data_pa_offset; 111 uintptr_t xiprom; 112 uintptr_t xiprom_sz; 113 #else 114 unsigned long page_offset; 115 unsigned long va_kernel_pa_offset; 116 #endif 117 }; 118 119 extern struct kernel_mapping kernel_map; 120 extern phys_addr_t phys_ram_base; 121 extern unsigned long vmemmap_start_pfn; 122 123 #define is_kernel_mapping(x) \ 124 ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size)) 125 126 #define is_linear_mapping(x) \ 127 ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE)) 128 129 #ifndef CONFIG_DEBUG_VIRTUAL 130 #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset)) 131 #else 132 void *linear_mapping_pa_to_va(unsigned long x); 133 #endif 134 135 #ifdef CONFIG_XIP_KERNEL 136 #define kernel_mapping_pa_to_va(y) ({ \ 137 unsigned long _y = (unsigned long)(y); \ 138 (_y < phys_ram_base) ? \ 139 (void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) : \ 140 (void *)(_y + kernel_map.va_kernel_xip_data_pa_offset); \ 141 }) 142 #else 143 #define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset)) 144 #endif 145 146 #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) 147 148 #ifndef CONFIG_DEBUG_VIRTUAL 149 #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset) 150 #else 151 phys_addr_t linear_mapping_va_to_pa(unsigned long x); 152 #endif 153 154 #ifdef CONFIG_XIP_KERNEL 155 #define kernel_mapping_va_to_pa(y) ({ \ 156 unsigned long _y = (unsigned long)(y); \ 157 (_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ? \ 158 (_y - kernel_map.va_kernel_xip_text_pa_offset) : \ 159 (_y - kernel_map.va_kernel_xip_data_pa_offset); \ 160 }) 161 #else 162 #define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset) 163 #endif 164 165 #define __va_to_pa_nodebug(x) ({ \ 166 unsigned long _x = x; \ 167 is_linear_mapping(_x) ? \ 168 linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \ 169 }) 170 171 #ifdef CONFIG_DEBUG_VIRTUAL 172 extern phys_addr_t __virt_to_phys(unsigned long x); 173 extern phys_addr_t __phys_addr_symbol(unsigned long x); 174 #else 175 #define __virt_to_phys(x) __va_to_pa_nodebug(x) 176 #define __phys_addr_symbol(x) __va_to_pa_nodebug(x) 177 #endif /* CONFIG_DEBUG_VIRTUAL */ 178 179 #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) 180 #define __pa(x) __virt_to_phys((unsigned long)(x)) 181 #define __va(x) ((void *)__pa_to_va_nodebug((phys_addr_t)(x))) 182 183 #define phys_to_pfn(phys) (PFN_DOWN(phys)) 184 #define pfn_to_phys(pfn) (PFN_PHYS(pfn)) 185 186 #define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr))) 187 #define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn))) 188 189 #define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) 190 #define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) 191 192 #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) 193 194 unsigned long kaslr_offset(void); 195 196 static __always_inline void *pfn_to_kaddr(unsigned long pfn) 197 { 198 return __va(pfn << PAGE_SHIFT); 199 } 200 201 #endif /* __ASSEMBLER__ */ 202 203 #define virt_addr_valid(vaddr) ({ \ 204 unsigned long _addr = (unsigned long)vaddr; \ 205 (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \ 206 }) 207 208 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC 209 210 #include <asm-generic/memory_model.h> 211 #include <asm-generic/getorder.h> 212 213 #endif /* _ASM_RISCV_PAGE_H */ 214