pgtable_32.c (56f3c1413f5cce0c8f4d6f1ab79d790da5aa61af) pgtable_32.c (c766ee72235d09b0080f77474085fc17d6ae2fb1)
1/*
2 * This file contains the routines setting up the linux page tables.
3 * -- paulus
4 *
5 * Derived from arch/ppc/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)

--- 62 unchanged lines hidden (view full) ---

71 return NULL;
72 }
73 return ptepage;
74}
75
76void __iomem *
77ioremap(phys_addr_t addr, unsigned long size)
78{
1/*
2 * This file contains the routines setting up the linux page tables.
3 * -- paulus
4 *
5 * Derived from arch/ppc/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)

--- 62 unchanged lines hidden (view full) ---

71 return NULL;
72 }
73 return ptepage;
74}
75
76void __iomem *
77ioremap(phys_addr_t addr, unsigned long size)
78{
79 unsigned long flags = pgprot_val(pgprot_noncached(PAGE_KERNEL));
79 pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
80
80
81 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
81 return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
82}
83EXPORT_SYMBOL(ioremap);
84
85void __iomem *
86ioremap_wc(phys_addr_t addr, unsigned long size)
87{
82}
83EXPORT_SYMBOL(ioremap);
84
85void __iomem *
86ioremap_wc(phys_addr_t addr, unsigned long size)
87{
88 unsigned long flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL));
88 pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
89
89
90 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
90 return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
91}
92EXPORT_SYMBOL(ioremap_wc);
93
94void __iomem *
95ioremap_wt(phys_addr_t addr, unsigned long size)
96{
91}
92EXPORT_SYMBOL(ioremap_wc);
93
94void __iomem *
95ioremap_wt(phys_addr_t addr, unsigned long size)
96{
97 unsigned long flags = pgprot_val(pgprot_cached_wthru(PAGE_KERNEL));
97 pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL);
98
98
99 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
99 return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
100}
101EXPORT_SYMBOL(ioremap_wt);
102
103void __iomem *
104ioremap_coherent(phys_addr_t addr, unsigned long size)
105{
100}
101EXPORT_SYMBOL(ioremap_wt);
102
103void __iomem *
104ioremap_coherent(phys_addr_t addr, unsigned long size)
105{
106 unsigned long flags = pgprot_val(pgprot_cached(PAGE_KERNEL));
106 pgprot_t prot = pgprot_cached(PAGE_KERNEL);
107
107
108 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
108 return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
109}
110EXPORT_SYMBOL(ioremap_coherent);
111
112void __iomem *
113ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
114{
115 /* writeable implies dirty for kernel addresses */
116 if ((flags & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO)
117 flags |= _PAGE_DIRTY | _PAGE_HWWRITE;
118
119 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
120 flags &= ~(_PAGE_USER | _PAGE_EXEC);
121 flags |= _PAGE_PRIVILEGED;
122
109}
110EXPORT_SYMBOL(ioremap_coherent);
111
112void __iomem *
113ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
114{
115 /* writeable implies dirty for kernel addresses */
116 if ((flags & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO)
117 flags |= _PAGE_DIRTY | _PAGE_HWWRITE;
118
119 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
120 flags &= ~(_PAGE_USER | _PAGE_EXEC);
121 flags |= _PAGE_PRIVILEGED;
122
123 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
123 return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
124}
125EXPORT_SYMBOL(ioremap_prot);
126
127void __iomem *
128__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
129{
124}
125EXPORT_SYMBOL(ioremap_prot);
126
127void __iomem *
128__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
129{
130 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
130 return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
131}
132
133void __iomem *
131}
132
133void __iomem *
134__ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
135 void *caller)
134__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller)
136{
137 unsigned long v, i;
138 phys_addr_t p;
139 int err;
140
141 /*
142 * Choose an address to map it to.
143 * Once the vmalloc system is running, we use it.

--- 46 unchanged lines hidden (view full) ---

190 }
191
192 /*
193 * Should check if it is a candidate for a BAT mapping
194 */
195
196 err = 0;
197 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
135{
136 unsigned long v, i;
137 phys_addr_t p;
138 int err;
139
140 /*
141 * Choose an address to map it to.
142 * Once the vmalloc system is running, we use it.

--- 46 unchanged lines hidden (view full) ---

189 }
190
191 /*
192 * Should check if it is a candidate for a BAT mapping
193 */
194
195 err = 0;
196 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
198 err = map_kernel_page(v+i, p+i, flags);
197 err = map_kernel_page(v + i, p + i, prot);
199 if (err) {
200 if (slab_is_available())
201 vunmap((void *)v);
202 return NULL;
203 }
204
205out:
206 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));

--- 9 unchanged lines hidden (view full) ---

216 if (v_block_mapped((unsigned long)addr))
217 return;
218
219 if (addr > high_memory && (unsigned long) addr < ioremap_bot)
220 vunmap((void *) (PAGE_MASK & (unsigned long)addr));
221}
222EXPORT_SYMBOL(iounmap);
223
198 if (err) {
199 if (slab_is_available())
200 vunmap((void *)v);
201 return NULL;
202 }
203
204out:
205 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));

--- 9 unchanged lines hidden (view full) ---

215 if (v_block_mapped((unsigned long)addr))
216 return;
217
218 if (addr > high_memory && (unsigned long) addr < ioremap_bot)
219 vunmap((void *) (PAGE_MASK & (unsigned long)addr));
220}
221EXPORT_SYMBOL(iounmap);
222
224int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
223int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
225{
226 pmd_t *pd;
227 pte_t *pg;
228 int err = -ENOMEM;
229
230 /* Use upper 10 bits of VA to index the first level map */
231 pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
232 /* Use middle 10 bits of VA to index the second-level map */
233 pg = pte_alloc_kernel(pd, va);
234 if (pg != 0) {
235 err = 0;
236 /* The PTE should never be already set nor present in the
237 * hash table
238 */
239 BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
224{
225 pmd_t *pd;
226 pte_t *pg;
227 int err = -ENOMEM;
228
229 /* Use upper 10 bits of VA to index the first level map */
230 pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
231 /* Use middle 10 bits of VA to index the second-level map */
232 pg = pte_alloc_kernel(pd, va);
233 if (pg != 0) {
234 err = 0;
235 /* The PTE should never be already set nor present in the
236 * hash table
237 */
238 BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
240 flags);
241 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
242 __pgprot(flags)));
239 pgprot_val(prot));
240 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
243 }
244 smp_wmb();
245 return err;
246}
247
248/*
249 * Map in a chunk of physical memory starting at start.
250 */
251static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
252{
241 }
242 smp_wmb();
243 return err;
244}
245
246/*
247 * Map in a chunk of physical memory starting at start.
248 */
249static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
250{
253 unsigned long v, s, f;
251 unsigned long v, s;
254 phys_addr_t p;
255 int ktext;
256
257 s = offset;
258 v = PAGE_OFFSET + s;
259 p = memstart_addr + s;
260 for (; s < top; s += PAGE_SIZE) {
261 ktext = ((char *)v >= _stext && (char *)v < etext) ||
262 ((char *)v >= _sinittext && (char *)v < _einittext);
252 phys_addr_t p;
253 int ktext;
254
255 s = offset;
256 v = PAGE_OFFSET + s;
257 p = memstart_addr + s;
258 for (; s < top; s += PAGE_SIZE) {
259 ktext = ((char *)v >= _stext && (char *)v < etext) ||
260 ((char *)v >= _sinittext && (char *)v < _einittext);
263 f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL);
264 map_kernel_page(v, p, f);
261 map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
265#ifdef CONFIG_PPC_STD_MMU_32
266 if (ktext)
267 hash_preload(&init_mm, v, 0, 0x300);
268#endif
269 v += PAGE_SIZE;
270 p += PAGE_SIZE;
271 }
272}

--- 143 unchanged lines hidden ---
262#ifdef CONFIG_PPC_STD_MMU_32
263 if (ktext)
264 hash_preload(&init_mm, v, 0, 0x300);
265#endif
266 v += PAGE_SIZE;
267 p += PAGE_SIZE;
268 }
269}

--- 143 unchanged lines hidden ---