xref: /linux/arch/sh/mm/init.c (revision 273b281fa22c293963ee3e6eec418f5dda2dbc83)
1 /*
2  * linux/arch/sh/mm/init.c
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2002 - 2007  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/init.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  */
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/proc_fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/percpu.h>
17 #include <linux/io.h>
18 #include <linux/dma-mapping.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlb.h>
21 #include <asm/cacheflush.h>
22 #include <asm/sections.h>
23 #include <asm/cache.h>
24 
25 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
26 pgd_t swapper_pg_dir[PTRS_PER_PGD];
27 
28 #ifdef CONFIG_SUPERH32
29 /*
30  * Handle trivial transitions between cached and uncached
31  * segments, making use of the 1:1 mapping relationship in
32  * 512MB lowmem.
33  *
34  * This is the offset of the uncached section from its cached alias.
35  * Default value only valid in 29 bit mode, in 32bit mode will be
36  * overridden in pmb_init.
37  */
38 unsigned long cached_to_uncached = P2SEG - P1SEG;
39 #endif
40 
41 #ifdef CONFIG_MMU
42 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
43 {
44 	pgd_t *pgd;
45 	pud_t *pud;
46 	pmd_t *pmd;
47 	pte_t *pte;
48 
49 	pgd = pgd_offset_k(addr);
50 	if (pgd_none(*pgd)) {
51 		pgd_ERROR(*pgd);
52 		return;
53 	}
54 
55 	pud = pud_alloc(NULL, pgd, addr);
56 	if (unlikely(!pud)) {
57 		pud_ERROR(*pud);
58 		return;
59 	}
60 
61 	pmd = pmd_alloc(NULL, pud, addr);
62 	if (unlikely(!pmd)) {
63 		pmd_ERROR(*pmd);
64 		return;
65 	}
66 
67 	pte = pte_offset_kernel(pmd, addr);
68 	if (!pte_none(*pte)) {
69 		pte_ERROR(*pte);
70 		return;
71 	}
72 
73 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
74 	local_flush_tlb_one(get_asid(), addr);
75 }
76 
77 /*
78  * As a performance optimization, other platforms preserve the fixmap mapping
79  * across a context switch, we don't presently do this, but this could be done
80  * in a similar fashion as to the wired TLB interface that sh64 uses (by way
81  * of the memory mapped UTLB configuration) -- this unfortunately forces us to
82  * give up a TLB entry for each mapping we want to preserve. While this may be
83  * viable for a small number of fixmaps, it's not particularly useful for
84  * everything and needs to be carefully evaluated. (ie, we may want this for
85  * the vsyscall page).
86  *
87  * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
88  * in at __set_fixmap() time to determine the appropriate behavior to follow.
89  *
90  *					 -- PFM.
91  */
92 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
93 {
94 	unsigned long address = __fix_to_virt(idx);
95 
96 	if (idx >= __end_of_fixed_addresses) {
97 		BUG();
98 		return;
99 	}
100 
101 	set_pte_phys(address, phys, prot);
102 }
103 
104 void __init page_table_range_init(unsigned long start, unsigned long end,
105 					 pgd_t *pgd_base)
106 {
107 	pgd_t *pgd;
108 	pud_t *pud;
109 	pmd_t *pmd;
110 	pte_t *pte;
111 	int i, j, k;
112 	unsigned long vaddr;
113 
114 	vaddr = start;
115 	i = __pgd_offset(vaddr);
116 	j = __pud_offset(vaddr);
117 	k = __pmd_offset(vaddr);
118 	pgd = pgd_base + i;
119 
120 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
121 		pud = (pud_t *)pgd;
122 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
123 			pmd = (pmd_t *)pud;
124 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
125 				if (pmd_none(*pmd)) {
126 					pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
127 					pmd_populate_kernel(&init_mm, pmd, pte);
128 					BUG_ON(pte != pte_offset_kernel(pmd, 0));
129 				}
130 				vaddr += PMD_SIZE;
131 			}
132 			k = 0;
133 		}
134 		j = 0;
135 	}
136 }
137 #endif	/* CONFIG_MMU */
138 
139 /*
140  * paging_init() sets up the page tables
141  */
142 void __init paging_init(void)
143 {
144 	unsigned long max_zone_pfns[MAX_NR_ZONES];
145 	unsigned long vaddr, end;
146 	int nid;
147 
148 	/* We don't need to map the kernel through the TLB, as
149 	 * it is permanatly mapped using P1. So clear the
150 	 * entire pgd. */
151 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
152 
153 	/* Set an initial value for the MMU.TTB so we don't have to
154 	 * check for a null value. */
155 	set_TTB(swapper_pg_dir);
156 
157 	/*
158 	 * Populate the relevant portions of swapper_pg_dir so that
159 	 * we can use the fixmap entries without calling kmalloc.
160 	 * pte's will be filled in by __set_fixmap().
161 	 */
162 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
163 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
164 	page_table_range_init(vaddr, end, swapper_pg_dir);
165 
166 	kmap_coherent_init();
167 
168 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
169 
170 	for_each_online_node(nid) {
171 		pg_data_t *pgdat = NODE_DATA(nid);
172 		unsigned long low, start_pfn;
173 
174 		start_pfn = pgdat->bdata->node_min_pfn;
175 		low = pgdat->bdata->node_low_pfn;
176 
177 		if (max_zone_pfns[ZONE_NORMAL] < low)
178 			max_zone_pfns[ZONE_NORMAL] = low;
179 
180 		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
181 		       nid, start_pfn, low);
182 	}
183 
184 	free_area_init_nodes(max_zone_pfns);
185 
186 	/* Set up the uncached fixmap */
187 	set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
188 }
189 
190 /*
191  * Early initialization for any I/O MMUs we might have.
192  */
193 static void __init iommu_init(void)
194 {
195 	no_iommu_init();
196 }
197 
198 void __init mem_init(void)
199 {
200 	int codesize, datasize, initsize;
201 	int nid;
202 
203 	iommu_init();
204 
205 	num_physpages = 0;
206 	high_memory = NULL;
207 
208 	for_each_online_node(nid) {
209 		pg_data_t *pgdat = NODE_DATA(nid);
210 		unsigned long node_pages = 0;
211 		void *node_high_memory;
212 
213 		num_physpages += pgdat->node_present_pages;
214 
215 		if (pgdat->node_spanned_pages)
216 			node_pages = free_all_bootmem_node(pgdat);
217 
218 		totalram_pages += node_pages;
219 
220 		node_high_memory = (void *)__va((pgdat->node_start_pfn +
221 						 pgdat->node_spanned_pages) <<
222 						 PAGE_SHIFT);
223 		if (node_high_memory > high_memory)
224 			high_memory = node_high_memory;
225 	}
226 
227 	/* Set this up early, so we can take care of the zero page */
228 	cpu_cache_init();
229 
230 	/* clear the zero-page */
231 	memset(empty_zero_page, 0, PAGE_SIZE);
232 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
233 
234 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
235 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
236 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
237 
238 	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
239 	       "%dk data, %dk init)\n",
240 		nr_free_pages() << (PAGE_SHIFT-10),
241 		num_physpages << (PAGE_SHIFT-10),
242 		codesize >> 10,
243 		datasize >> 10,
244 		initsize >> 10);
245 
246 	/* Initialize the vDSO */
247 	vsyscall_init();
248 }
249 
250 void free_initmem(void)
251 {
252 	unsigned long addr;
253 
254 	addr = (unsigned long)(&__init_begin);
255 	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
256 		ClearPageReserved(virt_to_page(addr));
257 		init_page_count(virt_to_page(addr));
258 		free_page(addr);
259 		totalram_pages++;
260 	}
261 	printk("Freeing unused kernel memory: %ldk freed\n",
262 	       ((unsigned long)&__init_end -
263 	        (unsigned long)&__init_begin) >> 10);
264 }
265 
266 #ifdef CONFIG_BLK_DEV_INITRD
267 void free_initrd_mem(unsigned long start, unsigned long end)
268 {
269 	unsigned long p;
270 	for (p = start; p < end; p += PAGE_SIZE) {
271 		ClearPageReserved(virt_to_page(p));
272 		init_page_count(virt_to_page(p));
273 		free_page(p);
274 		totalram_pages++;
275 	}
276 	printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
277 }
278 #endif
279 
280 #if THREAD_SHIFT < PAGE_SHIFT
281 static struct kmem_cache *thread_info_cache;
282 
283 struct thread_info *alloc_thread_info(struct task_struct *tsk)
284 {
285 	struct thread_info *ti;
286 
287 	ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
288 	if (unlikely(ti == NULL))
289 		return NULL;
290 #ifdef CONFIG_DEBUG_STACK_USAGE
291 	memset(ti, 0, THREAD_SIZE);
292 #endif
293 	return ti;
294 }
295 
296 void free_thread_info(struct thread_info *ti)
297 {
298 	kmem_cache_free(thread_info_cache, ti);
299 }
300 
301 void thread_info_cache_init(void)
302 {
303 	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
304 					      THREAD_SIZE, 0, NULL);
305 	BUG_ON(thread_info_cache == NULL);
306 }
307 #endif /* THREAD_SHIFT < PAGE_SHIFT */
308 
309 #ifdef CONFIG_MEMORY_HOTPLUG
310 int arch_add_memory(int nid, u64 start, u64 size)
311 {
312 	pg_data_t *pgdat;
313 	unsigned long start_pfn = start >> PAGE_SHIFT;
314 	unsigned long nr_pages = size >> PAGE_SHIFT;
315 	int ret;
316 
317 	pgdat = NODE_DATA(nid);
318 
319 	/* We only have ZONE_NORMAL, so this is easy.. */
320 	ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
321 				start_pfn, nr_pages);
322 	if (unlikely(ret))
323 		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
324 
325 	return ret;
326 }
327 EXPORT_SYMBOL_GPL(arch_add_memory);
328 
329 #ifdef CONFIG_NUMA
330 int memory_add_physaddr_to_nid(u64 addr)
331 {
332 	/* Node 0 for now.. */
333 	return 0;
334 }
335 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
336 #endif
337 
338 #endif /* CONFIG_MEMORY_HOTPLUG */
339 
340 #ifdef CONFIG_PMB
341 int __in_29bit_mode(void)
342 {
343 	return !(ctrl_inl(PMB_PASCR) & PASCR_SE);
344 }
345 #endif /* CONFIG_PMB */
346