xref: /linux/arch/microblaze/mm/pgtable.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /*
2  *  This file contains the routines setting up the linux page tables.
3  *
4  * Copyright (C) 2008 Michal Simek
5  * Copyright (C) 2008 PetaLogix
6  *
7  *    Copyright (C) 2007 Xilinx, Inc.  All rights reserved.
8  *
9  *  Derived from arch/ppc/mm/pgtable.c:
10  *    -- paulus
11  *
12  *  Derived from arch/ppc/mm/init.c:
13  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
14  *
15  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
16  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
17  *    Copyright (C) 1996 Paul Mackerras
18  *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
19  *
20  *  Derived from "arch/i386/mm/init.c"
21  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
22  *
23  *  This file is subject to the terms and conditions of the GNU General
24  *  Public License.  See the file COPYING in the main directory of this
25  *  archive for more details.
26  *
27  */
28 
29 #include <linux/export.h>
30 #include <linux/kernel.h>
31 #include <linux/types.h>
32 #include <linux/vmalloc.h>
33 #include <linux/init.h>
34 #include <linux/mm_types.h>
35 #include <linux/pgtable.h>
36 #include <linux/memblock.h>
37 #include <linux/kallsyms.h>
38 
39 #include <asm/pgalloc.h>
40 #include <linux/io.h>
41 #include <asm/mmu.h>
42 #include <asm/sections.h>
43 #include <asm/fixmap.h>
44 
45 unsigned long ioremap_base;
46 unsigned long ioremap_bot;
47 EXPORT_SYMBOL(ioremap_bot);
48 
49 static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
50 		unsigned long flags)
51 {
52 	unsigned long v, i;
53 	phys_addr_t p;
54 	int err;
55 
56 	/*
57 	 * Choose an address to map it to.
58 	 * Once the vmalloc system is running, we use it.
59 	 * Before then, we use space going down from ioremap_base
60 	 * (ioremap_bot records where we're up to).
61 	 */
62 	p = addr & PAGE_MASK;
63 	size = PAGE_ALIGN(addr + size) - p;
64 
65 	/*
66 	 * Don't allow anybody to remap normal RAM that we're using.
67 	 * mem_init() sets high_memory so only do the check after that.
68 	 *
69 	 * However, allow remap of rootfs: TBD
70 	 */
71 
72 	if (mem_init_done &&
73 		p >= memory_start && p < virt_to_phys(high_memory) &&
74 		!(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
75 		p < __virt_to_phys((phys_addr_t)__bss_stop))) {
76 		pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %ps\n",
77 			(unsigned long)p, __builtin_return_address(0));
78 		return NULL;
79 	}
80 
81 	if (size == 0)
82 		return NULL;
83 
84 	/*
85 	 * Is it already mapped? If the whole area is mapped then we're
86 	 * done, otherwise remap it since we want to keep the virt addrs for
87 	 * each request contiguous.
88 	 *
89 	 * We make the assumption here that if the bottom and top
90 	 * of the range we want are mapped then it's mapped to the
91 	 * same virt address (and this is contiguous).
92 	 *  -- Cort
93 	 */
94 
95 	if (mem_init_done) {
96 		struct vm_struct *area;
97 		area = get_vm_area(size, VM_IOREMAP);
98 		if (area == NULL)
99 			return NULL;
100 		v = (unsigned long) area->addr;
101 	} else {
102 		v = (ioremap_bot -= size);
103 	}
104 
105 	if ((flags & _PAGE_PRESENT) == 0)
106 		flags |= _PAGE_KERNEL;
107 	if (flags & _PAGE_NO_CACHE)
108 		flags |= _PAGE_GUARDED;
109 
110 	err = 0;
111 	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
112 		err = map_page(v + i, p + i, flags);
113 	if (err) {
114 		if (mem_init_done)
115 			vfree((void *)v);
116 		return NULL;
117 	}
118 
119 	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
120 }
121 
122 void __iomem *ioremap(phys_addr_t addr, unsigned long size)
123 {
124 	return __ioremap(addr, size, _PAGE_NO_CACHE);
125 }
126 EXPORT_SYMBOL(ioremap);
127 
128 void iounmap(volatile void __iomem *addr)
129 {
130 	if ((__force void *)addr > high_memory &&
131 					(unsigned long) addr < ioremap_bot)
132 		vfree((void *) (PAGE_MASK & (unsigned long) addr));
133 }
134 EXPORT_SYMBOL(iounmap);
135 
136 
137 int map_page(unsigned long va, phys_addr_t pa, int flags)
138 {
139 	p4d_t *p4d;
140 	pud_t *pud;
141 	pmd_t *pd;
142 	pte_t *pg;
143 	int err = -ENOMEM;
144 
145 	/* Use upper 10 bits of VA to index the first level map */
146 	p4d = p4d_offset(pgd_offset_k(va), va);
147 	pud = pud_offset(p4d, va);
148 	pd = pmd_offset(pud, va);
149 	/* Use middle 10 bits of VA to index the second-level map */
150 	pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
151 	/* pg = pte_alloc_kernel(&init_mm, pd, va); */
152 
153 	if (pg != NULL) {
154 		err = 0;
155 		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
156 				__pgprot(flags)));
157 		if (unlikely(mem_init_done))
158 			_tlbie(va);
159 	}
160 	return err;
161 }
162 
163 /*
164  * Map in all of physical memory starting at CONFIG_KERNEL_START.
165  */
166 void __init mapin_ram(void)
167 {
168 	unsigned long v, p, s, f;
169 
170 	v = CONFIG_KERNEL_START;
171 	p = memory_start;
172 	for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
173 		f = _PAGE_PRESENT | _PAGE_ACCESSED |
174 				_PAGE_SHARED | _PAGE_HWEXEC;
175 		if (!is_kernel_text(v))
176 			f |= _PAGE_WRENABLE;
177 		else
178 			/* On the MicroBlaze, no user access
179 			   forces R/W kernel access */
180 			f |= _PAGE_USER;
181 		map_page(v, p, f);
182 		v += PAGE_SIZE;
183 		p += PAGE_SIZE;
184 	}
185 }
186 
187 /* is x a power of 2? */
188 #define is_power_of_2(x)	((x) != 0 && (((x) & ((x) - 1)) == 0))
189 
190 /* Scan the real Linux page tables and return a PTE pointer for
191  * a virtual address in a context.
192  * Returns true (1) if PTE was found, zero otherwise.  The pointer to
193  * the PTE pointer is unmodified if PTE is not found.
194  */
195 static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
196 {
197 	pgd_t	*pgd;
198 	p4d_t	*p4d;
199 	pud_t	*pud;
200 	pmd_t	*pmd;
201 	pte_t	*pte;
202 	int     retval = 0;
203 
204 	pgd = pgd_offset(mm, addr & PAGE_MASK);
205 	if (pgd) {
206 		p4d = p4d_offset(pgd, addr & PAGE_MASK);
207 		pud = pud_offset(p4d, addr & PAGE_MASK);
208 		pmd = pmd_offset(pud, addr & PAGE_MASK);
209 		if (pmd_present(*pmd)) {
210 			pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
211 			if (pte) {
212 				retval = 1;
213 				*ptep = pte;
214 			}
215 		}
216 	}
217 	return retval;
218 }
219 
220 /* Find physical address for this virtual address.  Normally used by
221  * I/O functions, but anyone can call it.
222  */
223 unsigned long iopa(unsigned long addr)
224 {
225 	unsigned long pa;
226 
227 	pte_t *pte;
228 	struct mm_struct *mm;
229 
230 	/* Allow mapping of user addresses (within the thread)
231 	 * for DMA if necessary.
232 	 */
233 	if (addr < TASK_SIZE)
234 		mm = current->mm;
235 	else
236 		mm = &init_mm;
237 
238 	pa = 0;
239 	if (get_pteptr(mm, addr, &pte))
240 		pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
241 
242 	return pa;
243 }
244 
245 __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
246 {
247 	if (mem_init_done)
248 		return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
249 	else
250 		return memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
251 					      MEMBLOCK_LOW_LIMIT,
252 					      memory_start + kernel_tlb,
253 					      NUMA_NO_NODE);
254 }
255 
256 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
257 {
258 	unsigned long address = __fix_to_virt(idx);
259 
260 	if (idx >= __end_of_fixed_addresses)
261 		BUG();
262 
263 	map_page(address, phys, pgprot_val(flags));
264 }
265