xref: /linux/arch/xtensa/include/asm/page.h (revision 662fa3d6099374c4615bf64d06895e3573b935b2)
1 /*
2  * include/asm-xtensa/page.h
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version2 as
6  * published by the Free Software Foundation.
7  *
8  * Copyright (C) 2001 - 2007 Tensilica Inc.
9  */
10 
11 #ifndef _XTENSA_PAGE_H
12 #define _XTENSA_PAGE_H
13 
14 #include <asm/processor.h>
15 #include <asm/types.h>
16 #include <asm/cache.h>
17 #include <asm/kmem_layout.h>
18 
19 /*
20  * PAGE_SHIFT determines the page size
21  */
22 
23 #define PAGE_SHIFT	12
24 #define PAGE_SIZE	(__XTENSA_UL_CONST(1) << PAGE_SHIFT)
25 #define PAGE_MASK	(~(PAGE_SIZE-1))
26 
27 #ifdef CONFIG_MMU
28 #define PAGE_OFFSET	XCHAL_KSEG_CACHED_VADDR
29 #define PHYS_OFFSET	XCHAL_KSEG_PADDR
30 #define MAX_LOW_PFN	(PHYS_PFN(XCHAL_KSEG_PADDR) + \
31 			 PHYS_PFN(XCHAL_KSEG_SIZE))
32 #else
33 #define PAGE_OFFSET	_AC(CONFIG_DEFAULT_MEM_START, UL)
34 #define PHYS_OFFSET	_AC(CONFIG_DEFAULT_MEM_START, UL)
35 #define MAX_LOW_PFN	PHYS_PFN(0xfffffffful)
36 #endif
37 
38 /*
39  * Cache aliasing:
40  *
41  * If the cache size for one way is greater than the page size, we have to
42  * deal with cache aliasing. The cache index is wider than the page size:
43  *
44  * |    |cache| cache index
45  * | pfn  |off|	virtual address
46  * |xxxx:X|zzz|
47  * |    : |   |
48  * | \  / |   |
49  * |trans.|   |
50  * | /  \ |   |
51  * |yyyy:Y|zzz|	physical address
52  *
53  * When the page number is translated to the physical page address, the lowest
54  * bit(s) (X) that are part of the cache index are also translated (Y).
55  * If this translation changes bit(s) (X), the cache index is also afected,
56  * thus resulting in a different cache line than before.
57  * The kernel does not provide a mechanism to ensure that the page color
58  * (represented by this bit) remains the same when allocated or when pages
59  * are remapped. When user pages are mapped into kernel space, the color of
60  * the page might also change.
61  *
62  * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
63  * to temporarily map a patch so we can match the color.
64  */
65 
66 #if DCACHE_WAY_SIZE > PAGE_SIZE
67 # define DCACHE_ALIAS_ORDER	(DCACHE_WAY_SHIFT - PAGE_SHIFT)
68 # define DCACHE_ALIAS_MASK	(PAGE_MASK & (DCACHE_WAY_SIZE - 1))
69 # define DCACHE_ALIAS(a)	(((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
70 # define DCACHE_ALIAS_EQ(a,b)	((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
71 #else
72 # define DCACHE_ALIAS_ORDER	0
73 # define DCACHE_ALIAS(a)	((void)(a), 0)
74 #endif
75 #define DCACHE_N_COLORS		(1 << DCACHE_ALIAS_ORDER)
76 
77 #if ICACHE_WAY_SIZE > PAGE_SIZE
78 # define ICACHE_ALIAS_ORDER	(ICACHE_WAY_SHIFT - PAGE_SHIFT)
79 # define ICACHE_ALIAS_MASK	(PAGE_MASK & (ICACHE_WAY_SIZE - 1))
80 # define ICACHE_ALIAS(a)	(((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
81 # define ICACHE_ALIAS_EQ(a,b)	((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
82 #else
83 # define ICACHE_ALIAS_ORDER	0
84 #endif
85 
86 
87 #ifdef __ASSEMBLY__
88 
89 #define __pgprot(x)	(x)
90 
91 #else
92 
93 /*
94  * These are used to make use of C type-checking..
95  */
96 
97 typedef struct { unsigned long pte; } pte_t;		/* page table entry */
98 typedef struct { unsigned long pgd; } pgd_t;		/* PGD table entry */
99 typedef struct { unsigned long pgprot; } pgprot_t;
100 typedef struct page *pgtable_t;
101 
102 #define pte_val(x)	((x).pte)
103 #define pgd_val(x)	((x).pgd)
104 #define pgprot_val(x)	((x).pgprot)
105 
106 #define __pte(x)	((pte_t) { (x) } )
107 #define __pgd(x)	((pgd_t) { (x) } )
108 #define __pgprot(x)	((pgprot_t) { (x) } )
109 
110 /*
111  * Pure 2^n version of get_order
112  * Use 'nsau' instructions if supported by the processor or the generic version.
113  */
114 
115 #if XCHAL_HAVE_NSA
116 
117 static inline __attribute_const__ int get_order(unsigned long size)
118 {
119 	int lz;
120 	asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
121 	return 32 - lz;
122 }
123 
124 #else
125 
126 # include <asm-generic/getorder.h>
127 
128 #endif
129 
130 struct page;
131 struct vm_area_struct;
132 extern void clear_page(void *page);
133 extern void copy_page(void *to, void *from);
134 
135 /*
136  * If we have cache aliasing and writeback caches, we might have to do
137  * some extra work
138  */
139 
140 #if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
141 extern void clear_page_alias(void *vaddr, unsigned long paddr);
142 extern void copy_page_alias(void *to, void *from,
143 			    unsigned long to_paddr, unsigned long from_paddr);
144 
145 #define clear_user_highpage clear_user_highpage
146 void clear_user_highpage(struct page *page, unsigned long vaddr);
147 #define __HAVE_ARCH_COPY_USER_HIGHPAGE
148 void copy_user_highpage(struct page *to, struct page *from,
149 			unsigned long vaddr, struct vm_area_struct *vma);
150 #else
151 # define clear_user_page(page, vaddr, pg)	clear_page(page)
152 # define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
153 #endif
154 
155 /*
156  * This handles the memory map.  We handle pages at
157  * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
158  * These macros are for conversion of kernel address, not user
159  * addresses.
160  */
161 
162 #define ARCH_PFN_OFFSET		(PHYS_OFFSET >> PAGE_SHIFT)
163 
164 #ifdef CONFIG_MMU
165 static inline unsigned long ___pa(unsigned long va)
166 {
167 	unsigned long off = va - PAGE_OFFSET;
168 
169 	if (off >= XCHAL_KSEG_SIZE)
170 		off -= XCHAL_KSEG_SIZE;
171 
172 #ifndef CONFIG_XIP_KERNEL
173 	return off + PHYS_OFFSET;
174 #else
175 	if (off < XCHAL_KSEG_SIZE)
176 		return off + PHYS_OFFSET;
177 
178 	off -= XCHAL_KSEG_SIZE;
179 	if (off >= XCHAL_KIO_SIZE)
180 		off -= XCHAL_KIO_SIZE;
181 
182 	return off + XCHAL_KIO_PADDR;
183 #endif
184 }
185 #define __pa(x)	___pa((unsigned long)(x))
186 #else
187 #define __pa(x)	\
188 	((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
189 #endif
190 #define __va(x)	\
191 	((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
192 #define pfn_valid(pfn) \
193 	((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
194 
195 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
196 #define page_to_virt(page)	__va(page_to_pfn(page) << PAGE_SHIFT)
197 #define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
198 #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
199 
200 #endif /* __ASSEMBLY__ */
201 
202 #include <asm-generic/memory_model.h>
203 #endif /* _XTENSA_PAGE_H */
204