xref: /linux/arch/xtensa/include/asm/page.h (revision 72bea132f3680ee51e7ed2cee62892b6f5121909)
1 /*
2  * include/asm-xtensa/page.h
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version2 as
6  * published by the Free Software Foundation.
7  *
8  * Copyright (C) 2001 - 2007 Tensilica Inc.
9  */
10 
11 #ifndef _XTENSA_PAGE_H
12 #define _XTENSA_PAGE_H
13 
14 #include <linux/const.h>
15 
16 #include <asm/processor.h>
17 #include <asm/types.h>
18 #include <asm/cache.h>
19 #include <asm/kmem_layout.h>
20 
21 /*
22  * PAGE_SHIFT determines the page size
23  */
24 
25 #define PAGE_SHIFT	CONFIG_PAGE_SHIFT
26 #define PAGE_SIZE	(__XTENSA_UL_CONST(1) << PAGE_SHIFT)
27 #define PAGE_MASK	(~(PAGE_SIZE-1))
28 
29 #ifdef CONFIG_MMU
30 #define PAGE_OFFSET	XCHAL_KSEG_CACHED_VADDR
31 #define PHYS_OFFSET	XCHAL_KSEG_PADDR
32 #define MAX_LOW_PFN	(PHYS_PFN(XCHAL_KSEG_PADDR) + \
33 			 PHYS_PFN(XCHAL_KSEG_SIZE))
34 #else
35 #define PAGE_OFFSET	_AC(CONFIG_DEFAULT_MEM_START, UL)
36 #define PHYS_OFFSET	_AC(CONFIG_DEFAULT_MEM_START, UL)
37 #define MAX_LOW_PFN	PHYS_PFN(0xfffffffful)
38 #endif
39 
40 /*
41  * Cache aliasing:
42  *
43  * If the cache size for one way is greater than the page size, we have to
44  * deal with cache aliasing. The cache index is wider than the page size:
45  *
46  * |    |cache| cache index
47  * | pfn  |off|	virtual address
48  * |xxxx:X|zzz|
49  * |    : |   |
50  * | \  / |   |
51  * |trans.|   |
52  * | /  \ |   |
53  * |yyyy:Y|zzz|	physical address
54  *
55  * When the page number is translated to the physical page address, the lowest
56  * bit(s) (X) that are part of the cache index are also translated (Y).
57  * If this translation changes bit(s) (X), the cache index is also afected,
58  * thus resulting in a different cache line than before.
59  * The kernel does not provide a mechanism to ensure that the page color
60  * (represented by this bit) remains the same when allocated or when pages
61  * are remapped. When user pages are mapped into kernel space, the color of
62  * the page might also change.
63  *
64  * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
65  * to temporarily map a patch so we can match the color.
66  */
67 
68 #if DCACHE_WAY_SIZE > PAGE_SIZE
69 # define DCACHE_ALIAS_ORDER	(DCACHE_WAY_SHIFT - PAGE_SHIFT)
70 # define DCACHE_ALIAS_MASK	(PAGE_MASK & (DCACHE_WAY_SIZE - 1))
71 # define DCACHE_ALIAS(a)	(((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
72 # define DCACHE_ALIAS_EQ(a,b)	((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
73 #else
74 # define DCACHE_ALIAS_ORDER	0
75 # define DCACHE_ALIAS(a)	((void)(a), 0)
76 #endif
77 #define DCACHE_N_COLORS		(1 << DCACHE_ALIAS_ORDER)
78 
79 #if ICACHE_WAY_SIZE > PAGE_SIZE
80 # define ICACHE_ALIAS_ORDER	(ICACHE_WAY_SHIFT - PAGE_SHIFT)
81 # define ICACHE_ALIAS_MASK	(PAGE_MASK & (ICACHE_WAY_SIZE - 1))
82 # define ICACHE_ALIAS(a)	(((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
83 # define ICACHE_ALIAS_EQ(a,b)	((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
84 #else
85 # define ICACHE_ALIAS_ORDER	0
86 #endif
87 
88 
89 #ifdef __ASSEMBLY__
90 
91 #define __pgprot(x)	(x)
92 
93 #else
94 
95 /*
96  * These are used to make use of C type-checking..
97  */
98 
99 typedef struct { unsigned long pte; } pte_t;		/* page table entry */
100 typedef struct { unsigned long pgd; } pgd_t;		/* PGD table entry */
101 typedef struct { unsigned long pgprot; } pgprot_t;
102 typedef struct page *pgtable_t;
103 
104 #define pte_val(x)	((x).pte)
105 #define pgd_val(x)	((x).pgd)
106 #define pgprot_val(x)	((x).pgprot)
107 
108 #define __pte(x)	((pte_t) { (x) } )
109 #define __pgd(x)	((pgd_t) { (x) } )
110 #define __pgprot(x)	((pgprot_t) { (x) } )
111 
112 /*
113  * Pure 2^n version of get_order
114  * Use 'nsau' instructions if supported by the processor or the generic version.
115  */
116 
117 #if XCHAL_HAVE_NSA
118 
119 static inline __attribute_const__ int get_order(unsigned long size)
120 {
121 	int lz;
122 	asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
123 	return 32 - lz;
124 }
125 
126 #else
127 
128 # include <asm-generic/getorder.h>
129 
130 #endif
131 
132 struct page;
133 struct vm_area_struct;
134 extern void clear_page(void *page);
135 extern void copy_page(void *to, void *from);
136 
137 /*
138  * If we have cache aliasing and writeback caches, we might have to do
139  * some extra work
140  */
141 
142 #if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
143 extern void clear_page_alias(void *vaddr, unsigned long paddr);
144 extern void copy_page_alias(void *to, void *from,
145 			    unsigned long to_paddr, unsigned long from_paddr);
146 
147 #define clear_user_highpage clear_user_highpage
148 void clear_user_highpage(struct page *page, unsigned long vaddr);
149 #define __HAVE_ARCH_COPY_USER_HIGHPAGE
150 void copy_user_highpage(struct page *to, struct page *from,
151 			unsigned long vaddr, struct vm_area_struct *vma);
152 #else
153 # define clear_user_page(page, vaddr, pg)	clear_page(page)
154 # define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
155 #endif
156 
157 /*
158  * This handles the memory map.  We handle pages at
159  * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
160  * These macros are for conversion of kernel address, not user
161  * addresses.
162  */
163 
164 #define ARCH_PFN_OFFSET		(PHYS_OFFSET >> PAGE_SHIFT)
165 
166 #ifdef CONFIG_MMU
167 static inline unsigned long ___pa(unsigned long va)
168 {
169 	unsigned long off = va - PAGE_OFFSET;
170 
171 	if (off >= XCHAL_KSEG_SIZE)
172 		off -= XCHAL_KSEG_SIZE;
173 
174 #ifndef CONFIG_XIP_KERNEL
175 	return off + PHYS_OFFSET;
176 #else
177 	if (off < XCHAL_KSEG_SIZE)
178 		return off + PHYS_OFFSET;
179 
180 	off -= XCHAL_KSEG_SIZE;
181 	if (off >= XCHAL_KIO_SIZE)
182 		off -= XCHAL_KIO_SIZE;
183 
184 	return off + XCHAL_KIO_PADDR;
185 #endif
186 }
187 #define __pa(x)	___pa((unsigned long)(x))
188 #else
189 #define __pa(x)	\
190 	((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
191 #endif
192 #define __va(x)	\
193 	((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
194 
195 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
196 #define page_to_virt(page)	__va(page_to_pfn(page) << PAGE_SHIFT)
197 #define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
198 #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
199 
200 #endif /* __ASSEMBLY__ */
201 
202 #include <asm-generic/memory_model.h>
203 #endif /* _XTENSA_PAGE_H */
204