1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ASM_SH_PAGE_H 3 #define __ASM_SH_PAGE_H 4 5 /* 6 * Copyright (C) 1999 Niibe Yutaka 7 */ 8 9 #include <linux/const.h> 10 11 /* PAGE_SHIFT determines the page size */ 12 #if defined(CONFIG_PAGE_SIZE_4KB) 13 # define PAGE_SHIFT 12 14 #elif defined(CONFIG_PAGE_SIZE_8KB) 15 # define PAGE_SHIFT 13 16 #elif defined(CONFIG_PAGE_SIZE_16KB) 17 # define PAGE_SHIFT 14 18 #elif defined(CONFIG_PAGE_SIZE_64KB) 19 # define PAGE_SHIFT 16 20 #else 21 # error "Bogus kernel page size?" 22 #endif 23 24 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 25 #define PAGE_MASK (~(PAGE_SIZE-1)) 26 #define PTE_MASK PAGE_MASK 27 28 #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 29 #define HPAGE_SHIFT 16 30 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) 31 #define HPAGE_SHIFT 18 32 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) 33 #define HPAGE_SHIFT 20 34 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) 35 #define HPAGE_SHIFT 22 36 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) 37 #define HPAGE_SHIFT 26 38 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) 39 #define HPAGE_SHIFT 29 40 #endif 41 42 #ifdef CONFIG_HUGETLB_PAGE 43 #define HPAGE_SIZE (1UL << HPAGE_SHIFT) 44 #define HPAGE_MASK (~(HPAGE_SIZE-1)) 45 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) 46 #endif 47 48 #ifndef __ASSEMBLY__ 49 #include <asm/uncached.h> 50 51 extern unsigned long shm_align_mask; 52 extern unsigned long max_low_pfn, min_low_pfn; 53 extern unsigned long memory_start, memory_end, memory_limit; 54 55 static inline unsigned long 56 pages_do_alias(unsigned long addr1, unsigned long addr2) 57 { 58 return (addr1 ^ addr2) & shm_align_mask; 59 } 60 61 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 62 extern void copy_page(void *to, void *from); 63 #define copy_user_page(to, from, vaddr, pg) __copy_user(to, from, PAGE_SIZE) 64 65 struct page; 66 struct vm_area_struct; 67 68 extern void copy_user_highpage(struct page *to, struct page *from, 69 unsigned long vaddr, struct vm_area_struct *vma); 70 #define __HAVE_ARCH_COPY_USER_HIGHPAGE 71 extern void clear_user_highpage(struct page *page, unsigned long vaddr); 72 #define clear_user_highpage clear_user_highpage 73 74 /* 75 * These are used to make use of C type-checking.. 76 */ 77 #ifdef CONFIG_X2TLB 78 typedef struct { unsigned long pte_low, pte_high; } pte_t; 79 typedef struct { unsigned long long pgprot; } pgprot_t; 80 typedef struct { unsigned long long pgd; } pgd_t; 81 #define pte_val(x) \ 82 ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) 83 #define __pte(x) \ 84 ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) 85 #elif defined(CONFIG_SUPERH32) 86 typedef struct { unsigned long pte_low; } pte_t; 87 typedef struct { unsigned long pgprot; } pgprot_t; 88 typedef struct { unsigned long pgd; } pgd_t; 89 #define pte_val(x) ((x).pte_low) 90 #define __pte(x) ((pte_t) { (x) } ) 91 #else 92 typedef struct { unsigned long long pte_low; } pte_t; 93 typedef struct { unsigned long long pgprot; } pgprot_t; 94 typedef struct { unsigned long pgd; } pgd_t; 95 #define pte_val(x) ((x).pte_low) 96 #define __pte(x) ((pte_t) { (x) } ) 97 #endif 98 99 #define pgd_val(x) ((x).pgd) 100 #define pgprot_val(x) ((x).pgprot) 101 102 #define __pgd(x) ((pgd_t) { (x) } ) 103 #define __pgprot(x) ((pgprot_t) { (x) } ) 104 105 typedef struct page *pgtable_t; 106 107 #define pte_pgprot(x) __pgprot(pte_val(x) & PTE_FLAGS_MASK) 108 109 #endif /* !__ASSEMBLY__ */ 110 111 /* 112 * __MEMORY_START and SIZE are the physical addresses and size of RAM. 113 */ 114 #define __MEMORY_START CONFIG_MEMORY_START 115 #define __MEMORY_SIZE CONFIG_MEMORY_SIZE 116 117 /* 118 * PHYSICAL_OFFSET is the offset in physical memory where the base 119 * of the kernel is loaded. 120 */ 121 #ifdef CONFIG_PHYSICAL_START 122 #define PHYSICAL_OFFSET (CONFIG_PHYSICAL_START - __MEMORY_START) 123 #else 124 #define PHYSICAL_OFFSET 0 125 #endif 126 127 /* 128 * PAGE_OFFSET is the virtual address of the start of kernel address 129 * space. 130 */ 131 #define PAGE_OFFSET CONFIG_PAGE_OFFSET 132 133 /* 134 * Virtual to physical RAM address translation. 135 * 136 * In 29 bit mode, the physical offset of RAM from address 0 is visible in 137 * the kernel virtual address space, and thus we don't have to take 138 * this into account when translating. However in 32 bit mode this offset 139 * is not visible (it is part of the PMB mapping) and so needs to be 140 * added or subtracted as required. 141 */ 142 #ifdef CONFIG_PMB 143 #define ___pa(x) ((x)-PAGE_OFFSET+__MEMORY_START) 144 #define ___va(x) ((x)+PAGE_OFFSET-__MEMORY_START) 145 #else 146 #define ___pa(x) ((x)-PAGE_OFFSET) 147 #define ___va(x) ((x)+PAGE_OFFSET) 148 #endif 149 150 #ifndef __ASSEMBLY__ 151 #define __pa(x) ___pa((unsigned long)x) 152 #define __va(x) (void *)___va((unsigned long)x) 153 #endif /* !__ASSEMBLY__ */ 154 155 #ifdef CONFIG_UNCACHED_MAPPING 156 #if defined(CONFIG_29BIT) 157 #define UNCAC_ADDR(addr) P2SEGADDR(addr) 158 #define CAC_ADDR(addr) P1SEGADDR(addr) 159 #else 160 #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start) 161 #define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET) 162 #endif 163 #else 164 #define UNCAC_ADDR(addr) ((addr)) 165 #define CAC_ADDR(addr) ((addr)) 166 #endif 167 168 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 169 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 170 171 /* 172 * PFN = physical frame number (ie PFN 0 == physical address 0) 173 * PFN_START is the PFN of the first page of RAM. By defining this we 174 * don't have struct page entries for the portion of address space 175 * between physical address 0 and the start of RAM. 176 */ 177 #define PFN_START (__MEMORY_START >> PAGE_SHIFT) 178 #define ARCH_PFN_OFFSET (PFN_START) 179 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 180 #ifdef CONFIG_FLATMEM 181 #define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn) 182 #endif 183 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 184 185 #include <asm-generic/memory_model.h> 186 #include <asm-generic/getorder.h> 187 188 /* 189 * Some drivers need to perform DMA into kmalloc'ed buffers 190 * and so we have to increase the kmalloc minalign for this. 191 */ 192 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES 193 194 #ifdef CONFIG_SUPERH64 195 /* 196 * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still 197 * happily generate {ld/st}.q pairs, requiring us to have 8-byte 198 * alignment to avoid traps. The kmalloc alignment is guaranteed by 199 * virtue of L1_CACHE_BYTES, requiring this to only be special cased 200 * for slab caches. 201 */ 202 #define ARCH_SLAB_MINALIGN 8 203 #endif 204 205 #endif /* __ASM_SH_PAGE_H */ 206