xref: /linux/arch/sh/include/asm/page.h (revision 2277ab4a1df50e05bc732fe9488d4e902bb8399a)
1 #ifndef __ASM_SH_PAGE_H
2 #define __ASM_SH_PAGE_H
3 
4 /*
5  * Copyright (C) 1999  Niibe Yutaka
6  */
7 
8 #include <linux/const.h>
9 
10 /* PAGE_SHIFT determines the page size */
11 #if defined(CONFIG_PAGE_SIZE_4KB)
12 # define PAGE_SHIFT	12
13 #elif defined(CONFIG_PAGE_SIZE_8KB)
14 # define PAGE_SHIFT	13
15 #elif defined(CONFIG_PAGE_SIZE_16KB)
16 # define PAGE_SHIFT	14
17 #elif defined(CONFIG_PAGE_SIZE_64KB)
18 # define PAGE_SHIFT	16
19 #else
20 # error "Bogus kernel page size?"
21 #endif
22 
23 #define PAGE_SIZE	(_AC(1, UL) << PAGE_SHIFT)
24 #define PAGE_MASK	(~(PAGE_SIZE-1))
25 #define PTE_MASK	PAGE_MASK
26 
27 #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
28 #define HPAGE_SHIFT	16
29 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
30 #define HPAGE_SHIFT	18
31 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
32 #define HPAGE_SHIFT	20
33 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
34 #define HPAGE_SHIFT	22
35 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
36 #define HPAGE_SHIFT	26
37 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
38 #define HPAGE_SHIFT	29
39 #endif
40 
41 #ifdef CONFIG_HUGETLB_PAGE
42 #define HPAGE_SIZE		(1UL << HPAGE_SHIFT)
43 #define HPAGE_MASK		(~(HPAGE_SIZE-1))
44 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT-PAGE_SHIFT)
45 #endif
46 
47 #ifndef __ASSEMBLY__
48 
49 extern unsigned long shm_align_mask;
50 extern unsigned long max_low_pfn, min_low_pfn;
51 extern unsigned long memory_start, memory_end;
52 
53 static inline unsigned long
54 pages_do_alias(unsigned long addr1, unsigned long addr2)
55 {
56 	return (addr1 ^ addr2) & shm_align_mask;
57 }
58 
59 extern void clear_page(void *to);
60 extern void copy_page(void *to, void *from);
61 
62 #if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
63 	(defined(CONFIG_CPU_SH5) || defined(CONFIG_CPU_SH4) || \
64 	 defined(CONFIG_SH7705_CACHE_32KB))
65 struct page;
66 struct vm_area_struct;
67 extern void clear_user_page(void *to, unsigned long address, struct page *page);
68 extern void copy_user_page(void *to, void *from, unsigned long address,
69 			   struct page *page);
70 #if defined(CONFIG_CPU_SH4)
71 extern void copy_user_highpage(struct page *to, struct page *from,
72 			       unsigned long vaddr, struct vm_area_struct *vma);
73 #define __HAVE_ARCH_COPY_USER_HIGHPAGE
74 #endif
75 #else
76 #define clear_user_page(page, vaddr, pg)	clear_page(page)
77 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
78 #endif
79 
80 /*
81  * These are used to make use of C type-checking..
82  */
83 #ifdef CONFIG_X2TLB
84 typedef struct { unsigned long pte_low, pte_high; } pte_t;
85 typedef struct { unsigned long long pgprot; } pgprot_t;
86 typedef struct { unsigned long long pgd; } pgd_t;
87 #define pte_val(x) \
88 	((x).pte_low | ((unsigned long long)(x).pte_high << 32))
89 #define __pte(x) \
90 	({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
91 #elif defined(CONFIG_SUPERH32)
92 typedef struct { unsigned long pte_low; } pte_t;
93 typedef struct { unsigned long pgprot; } pgprot_t;
94 typedef struct { unsigned long pgd; } pgd_t;
95 #define pte_val(x)	((x).pte_low)
96 #define __pte(x)	((pte_t) { (x) } )
97 #else
98 typedef struct { unsigned long long pte_low; } pte_t;
99 typedef struct { unsigned long pgprot; } pgprot_t;
100 typedef struct { unsigned long pgd; } pgd_t;
101 #define pte_val(x)	((x).pte_low)
102 #define __pte(x)	((pte_t) { (x) } )
103 #endif
104 
105 #define pgd_val(x)	((x).pgd)
106 #define pgprot_val(x)	((x).pgprot)
107 
108 #define __pgd(x) ((pgd_t) { (x) } )
109 #define __pgprot(x)	((pgprot_t) { (x) } )
110 
111 typedef struct page *pgtable_t;
112 
113 #define pte_pgprot(x) __pgprot(pte_val(x) & PTE_FLAGS_MASK)
114 
115 #endif /* !__ASSEMBLY__ */
116 
117 /*
118  * __MEMORY_START and SIZE are the physical addresses and size of RAM.
119  */
120 #define __MEMORY_START		CONFIG_MEMORY_START
121 #define __MEMORY_SIZE		CONFIG_MEMORY_SIZE
122 
123 /*
124  * PAGE_OFFSET is the virtual address of the start of kernel address
125  * space.
126  */
127 #define PAGE_OFFSET		CONFIG_PAGE_OFFSET
128 
129 /*
130  * Virtual to physical RAM address translation.
131  *
132  * In 29 bit mode, the physical offset of RAM from address 0 is visible in
133  * the kernel virtual address space, and thus we don't have to take
134  * this into account when translating. However in 32 bit mode this offset
135  * is not visible (it is part of the PMB mapping) and so needs to be
136  * added or subtracted as required.
137  */
138 #if defined(CONFIG_PMB_FIXED)
139 /* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */
140 #define PMB_OFFSET	(PAGE_OFFSET - PXSEG(__MEMORY_START))
141 #define __pa(x)	((unsigned long)(x) - PMB_OFFSET)
142 #define __va(x)	((void *)((unsigned long)(x) + PMB_OFFSET))
143 #elif defined(CONFIG_32BIT)
144 #define __pa(x)	((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
145 #define __va(x)	((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
146 #else
147 #define __pa(x)	((unsigned long)(x)-PAGE_OFFSET)
148 #define __va(x)	((void *)((unsigned long)(x)+PAGE_OFFSET))
149 #endif
150 
151 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
152 #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
153 
154 /*
155  * PFN = physical frame number (ie PFN 0 == physical address 0)
156  * PFN_START is the PFN of the first page of RAM. By defining this we
157  * don't have struct page entries for the portion of address space
158  * between physical address 0 and the start of RAM.
159  */
160 #define PFN_START		(__MEMORY_START >> PAGE_SHIFT)
161 #define ARCH_PFN_OFFSET		(PFN_START)
162 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
163 #ifdef CONFIG_FLATMEM
164 #define pfn_valid(pfn)		((pfn) >= min_low_pfn && (pfn) < max_low_pfn)
165 #endif
166 #define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
167 
168 #define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
169 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
170 
171 #include <asm-generic/memory_model.h>
172 #include <asm-generic/getorder.h>
173 
174 /* vDSO support */
175 #ifdef CONFIG_VSYSCALL
176 #define __HAVE_ARCH_GATE_AREA
177 #endif
178 
179 /*
180  * Some drivers need to perform DMA into kmalloc'ed buffers
181  * and so we have to increase the kmalloc minalign for this.
182  */
183 #define ARCH_KMALLOC_MINALIGN	L1_CACHE_BYTES
184 
185 #ifdef CONFIG_SUPERH64
186 /*
187  * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
188  * happily generate {ld/st}.q pairs, requiring us to have 8-byte
189  * alignment to avoid traps. The kmalloc alignment is gauranteed by
190  * virtue of L1_CACHE_BYTES, requiring this to only be special cased
191  * for slab caches.
192  */
193 #define ARCH_SLAB_MINALIGN	8
194 #endif
195 
196 #endif /* __ASM_SH_PAGE_H */
197