xref: /linux/arch/sh/mm/init.c (revision 60e13231561b3a4c5269bfa1ef6c0569ad6f28ec)
1 /*
2  * linux/arch/sh/mm/init.c
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2002 - 2011  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/init.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  */
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/gfp.h>
14 #include <linux/bootmem.h>
15 #include <linux/proc_fs.h>
16 #include <linux/pagemap.h>
17 #include <linux/percpu.h>
18 #include <linux/io.h>
19 #include <linux/memblock.h>
20 #include <linux/dma-mapping.h>
21 #include <asm/mmu_context.h>
22 #include <asm/mmzone.h>
23 #include <asm/kexec.h>
24 #include <asm/tlb.h>
25 #include <asm/cacheflush.h>
26 #include <asm/sections.h>
27 #include <asm/setup.h>
28 #include <asm/cache.h>
29 #include <asm/sizes.h>
30 
31 pgd_t swapper_pg_dir[PTRS_PER_PGD];
32 
33 void __init generic_mem_init(void)
34 {
35 	memblock_add(__MEMORY_START, __MEMORY_SIZE);
36 }
37 
38 void __init __weak plat_mem_setup(void)
39 {
40 	/* Nothing to see here, move along. */
41 }
42 
43 #ifdef CONFIG_MMU
44 static pte_t *__get_pte_phys(unsigned long addr)
45 {
46 	pgd_t *pgd;
47 	pud_t *pud;
48 	pmd_t *pmd;
49 
50 	pgd = pgd_offset_k(addr);
51 	if (pgd_none(*pgd)) {
52 		pgd_ERROR(*pgd);
53 		return NULL;
54 	}
55 
56 	pud = pud_alloc(NULL, pgd, addr);
57 	if (unlikely(!pud)) {
58 		pud_ERROR(*pud);
59 		return NULL;
60 	}
61 
62 	pmd = pmd_alloc(NULL, pud, addr);
63 	if (unlikely(!pmd)) {
64 		pmd_ERROR(*pmd);
65 		return NULL;
66 	}
67 
68 	return pte_offset_kernel(pmd, addr);
69 }
70 
71 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
72 {
73 	pte_t *pte;
74 
75 	pte = __get_pte_phys(addr);
76 	if (!pte_none(*pte)) {
77 		pte_ERROR(*pte);
78 		return;
79 	}
80 
81 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
82 	local_flush_tlb_one(get_asid(), addr);
83 
84 	if (pgprot_val(prot) & _PAGE_WIRED)
85 		tlb_wire_entry(NULL, addr, *pte);
86 }
87 
88 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
89 {
90 	pte_t *pte;
91 
92 	pte = __get_pte_phys(addr);
93 
94 	if (pgprot_val(prot) & _PAGE_WIRED)
95 		tlb_unwire_entry();
96 
97 	set_pte(pte, pfn_pte(0, __pgprot(0)));
98 	local_flush_tlb_one(get_asid(), addr);
99 }
100 
101 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
102 {
103 	unsigned long address = __fix_to_virt(idx);
104 
105 	if (idx >= __end_of_fixed_addresses) {
106 		BUG();
107 		return;
108 	}
109 
110 	set_pte_phys(address, phys, prot);
111 }
112 
113 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
114 {
115 	unsigned long address = __fix_to_virt(idx);
116 
117 	if (idx >= __end_of_fixed_addresses) {
118 		BUG();
119 		return;
120 	}
121 
122 	clear_pte_phys(address, prot);
123 }
124 
125 static pmd_t * __init one_md_table_init(pud_t *pud)
126 {
127 	if (pud_none(*pud)) {
128 		pmd_t *pmd;
129 
130 		pmd = alloc_bootmem_pages(PAGE_SIZE);
131 		pud_populate(&init_mm, pud, pmd);
132 		BUG_ON(pmd != pmd_offset(pud, 0));
133 	}
134 
135 	return pmd_offset(pud, 0);
136 }
137 
138 static pte_t * __init one_page_table_init(pmd_t *pmd)
139 {
140 	if (pmd_none(*pmd)) {
141 		pte_t *pte;
142 
143 		pte = alloc_bootmem_pages(PAGE_SIZE);
144 		pmd_populate_kernel(&init_mm, pmd, pte);
145 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
146 	}
147 
148 	return pte_offset_kernel(pmd, 0);
149 }
150 
151 static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
152 					    unsigned long vaddr, pte_t *lastpte)
153 {
154 	return pte;
155 }
156 
157 void __init page_table_range_init(unsigned long start, unsigned long end,
158 					 pgd_t *pgd_base)
159 {
160 	pgd_t *pgd;
161 	pud_t *pud;
162 	pmd_t *pmd;
163 	pte_t *pte = NULL;
164 	int i, j, k;
165 	unsigned long vaddr;
166 
167 	vaddr = start;
168 	i = __pgd_offset(vaddr);
169 	j = __pud_offset(vaddr);
170 	k = __pmd_offset(vaddr);
171 	pgd = pgd_base + i;
172 
173 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
174 		pud = (pud_t *)pgd;
175 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
176 			pmd = one_md_table_init(pud);
177 #ifndef __PAGETABLE_PMD_FOLDED
178 			pmd += k;
179 #endif
180 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
181 				pte = page_table_kmap_check(one_page_table_init(pmd),
182 							    pmd, vaddr, pte);
183 				vaddr += PMD_SIZE;
184 			}
185 			k = 0;
186 		}
187 		j = 0;
188 	}
189 }
190 #endif	/* CONFIG_MMU */
191 
192 void __init allocate_pgdat(unsigned int nid)
193 {
194 	unsigned long start_pfn, end_pfn;
195 #ifdef CONFIG_NEED_MULTIPLE_NODES
196 	unsigned long phys;
197 #endif
198 
199 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
200 
201 #ifdef CONFIG_NEED_MULTIPLE_NODES
202 	phys = __memblock_alloc_base(sizeof(struct pglist_data),
203 				SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
204 	/* Retry with all of system memory */
205 	if (!phys)
206 		phys = __memblock_alloc_base(sizeof(struct pglist_data),
207 					SMP_CACHE_BYTES, memblock_end_of_DRAM());
208 	if (!phys)
209 		panic("Can't allocate pgdat for node %d\n", nid);
210 
211 	NODE_DATA(nid) = __va(phys);
212 	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
213 
214 	NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
215 #endif
216 
217 	NODE_DATA(nid)->node_start_pfn = start_pfn;
218 	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
219 }
220 
221 static void __init bootmem_init_one_node(unsigned int nid)
222 {
223 	unsigned long total_pages, paddr;
224 	unsigned long end_pfn;
225 	struct pglist_data *p;
226 
227 	p = NODE_DATA(nid);
228 
229 	/* Nothing to do.. */
230 	if (!p->node_spanned_pages)
231 		return;
232 
233 	end_pfn = p->node_start_pfn + p->node_spanned_pages;
234 
235 	total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
236 
237 	paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
238 	if (!paddr)
239 		panic("Can't allocate bootmap for nid[%d]\n", nid);
240 
241 	init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
242 
243 	free_bootmem_with_active_regions(nid, end_pfn);
244 
245 	/*
246 	 * XXX Handle initial reservations for the system memory node
247 	 * only for the moment, we'll refactor this later for handling
248 	 * reservations in other nodes.
249 	 */
250 	if (nid == 0) {
251 		struct memblock_region *reg;
252 
253 		/* Reserve the sections we're already using. */
254 		for_each_memblock(reserved, reg) {
255 			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
256 		}
257 	}
258 
259 	sparse_memory_present_with_active_regions(nid);
260 }
261 
262 static void __init do_init_bootmem(void)
263 {
264 	struct memblock_region *reg;
265 	int i;
266 
267 	/* Add active regions with valid PFNs. */
268 	for_each_memblock(memory, reg) {
269 		unsigned long start_pfn, end_pfn;
270 		start_pfn = memblock_region_memory_base_pfn(reg);
271 		end_pfn = memblock_region_memory_end_pfn(reg);
272 		__add_active_range(0, start_pfn, end_pfn);
273 	}
274 
275 	/* All of system RAM sits in node 0 for the non-NUMA case */
276 	allocate_pgdat(0);
277 	node_set_online(0);
278 
279 	plat_mem_setup();
280 
281 	for_each_online_node(i)
282 		bootmem_init_one_node(i);
283 
284 	sparse_init();
285 }
286 
287 static void __init early_reserve_mem(void)
288 {
289 	unsigned long start_pfn;
290 
291 	/*
292 	 * Partially used pages are not usable - thus
293 	 * we are rounding upwards:
294 	 */
295 	start_pfn = PFN_UP(__pa(_end));
296 
297 	/*
298 	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
299 	 * this in two steps (first step was init_bootmem()), because
300 	 * this catches the (definitely buggy) case of us accidentally
301 	 * initializing the bootmem allocator with an invalid RAM area.
302 	 */
303 	memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
304 		    (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
305 		    (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
306 
307 	/*
308 	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
309 	 */
310 	if (CONFIG_ZERO_PAGE_OFFSET != 0)
311 		memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
312 
313 	/*
314 	 * Handle additional early reservations
315 	 */
316 	check_for_initrd();
317 	reserve_crashkernel();
318 }
319 
320 void __init paging_init(void)
321 {
322 	unsigned long max_zone_pfns[MAX_NR_ZONES];
323 	unsigned long vaddr, end;
324 	int nid;
325 
326 	memblock_init();
327 	sh_mv.mv_mem_init();
328 
329 	early_reserve_mem();
330 
331 	/*
332 	 * Once the early reservations are out of the way, give the
333 	 * platforms a chance to kick out some memory.
334 	 */
335 	if (sh_mv.mv_mem_reserve)
336 		sh_mv.mv_mem_reserve();
337 
338 	memblock_enforce_memory_limit(memory_limit);
339 	memblock_analyze();
340 
341 	memblock_dump_all();
342 
343 	/*
344 	 * Determine low and high memory ranges:
345 	 */
346 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
347 	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
348 
349 	nodes_clear(node_online_map);
350 
351 	memory_start = (unsigned long)__va(__MEMORY_START);
352 	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
353 
354 	uncached_init();
355 	pmb_init();
356 	do_init_bootmem();
357 	ioremap_fixed_init();
358 
359 	/* We don't need to map the kernel through the TLB, as
360 	 * it is permanatly mapped using P1. So clear the
361 	 * entire pgd. */
362 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
363 
364 	/* Set an initial value for the MMU.TTB so we don't have to
365 	 * check for a null value. */
366 	set_TTB(swapper_pg_dir);
367 
368 	/*
369 	 * Populate the relevant portions of swapper_pg_dir so that
370 	 * we can use the fixmap entries without calling kmalloc.
371 	 * pte's will be filled in by __set_fixmap().
372 	 */
373 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
374 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
375 	page_table_range_init(vaddr, end, swapper_pg_dir);
376 
377 	kmap_coherent_init();
378 
379 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
380 
381 	for_each_online_node(nid) {
382 		pg_data_t *pgdat = NODE_DATA(nid);
383 		unsigned long low, start_pfn;
384 
385 		start_pfn = pgdat->bdata->node_min_pfn;
386 		low = pgdat->bdata->node_low_pfn;
387 
388 		if (max_zone_pfns[ZONE_NORMAL] < low)
389 			max_zone_pfns[ZONE_NORMAL] = low;
390 
391 		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
392 		       nid, start_pfn, low);
393 	}
394 
395 	free_area_init_nodes(max_zone_pfns);
396 }
397 
398 /*
399  * Early initialization for any I/O MMUs we might have.
400  */
401 static void __init iommu_init(void)
402 {
403 	no_iommu_init();
404 }
405 
406 unsigned int mem_init_done = 0;
407 
408 void __init mem_init(void)
409 {
410 	int codesize, datasize, initsize;
411 	int nid;
412 
413 	iommu_init();
414 
415 	num_physpages = 0;
416 	high_memory = NULL;
417 
418 	for_each_online_node(nid) {
419 		pg_data_t *pgdat = NODE_DATA(nid);
420 		unsigned long node_pages = 0;
421 		void *node_high_memory;
422 
423 		num_physpages += pgdat->node_present_pages;
424 
425 		if (pgdat->node_spanned_pages)
426 			node_pages = free_all_bootmem_node(pgdat);
427 
428 		totalram_pages += node_pages;
429 
430 		node_high_memory = (void *)__va((pgdat->node_start_pfn +
431 						 pgdat->node_spanned_pages) <<
432 						 PAGE_SHIFT);
433 		if (node_high_memory > high_memory)
434 			high_memory = node_high_memory;
435 	}
436 
437 	/* Set this up early, so we can take care of the zero page */
438 	cpu_cache_init();
439 
440 	/* clear the zero-page */
441 	memset(empty_zero_page, 0, PAGE_SIZE);
442 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
443 
444 	vsyscall_init();
445 
446 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
447 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
448 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
449 
450 	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
451 	       "%dk data, %dk init)\n",
452 		nr_free_pages() << (PAGE_SHIFT-10),
453 		num_physpages << (PAGE_SHIFT-10),
454 		codesize >> 10,
455 		datasize >> 10,
456 		initsize >> 10);
457 
458 	printk(KERN_INFO "virtual kernel memory layout:\n"
459 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
460 #ifdef CONFIG_HIGHMEM
461 		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
462 #endif
463 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
464 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
465 #ifdef CONFIG_UNCACHED_MAPPING
466 		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
467 #endif
468 		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
469 		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
470 		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
471 		FIXADDR_START, FIXADDR_TOP,
472 		(FIXADDR_TOP - FIXADDR_START) >> 10,
473 
474 #ifdef CONFIG_HIGHMEM
475 		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
476 		(LAST_PKMAP*PAGE_SIZE) >> 10,
477 #endif
478 
479 		(unsigned long)VMALLOC_START, VMALLOC_END,
480 		(VMALLOC_END - VMALLOC_START) >> 20,
481 
482 		(unsigned long)memory_start, (unsigned long)high_memory,
483 		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
484 
485 #ifdef CONFIG_UNCACHED_MAPPING
486 		uncached_start, uncached_end, uncached_size >> 20,
487 #endif
488 
489 		(unsigned long)&__init_begin, (unsigned long)&__init_end,
490 		((unsigned long)&__init_end -
491 		 (unsigned long)&__init_begin) >> 10,
492 
493 		(unsigned long)&_etext, (unsigned long)&_edata,
494 		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
495 
496 		(unsigned long)&_text, (unsigned long)&_etext,
497 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
498 
499 	mem_init_done = 1;
500 }
501 
502 void free_initmem(void)
503 {
504 	unsigned long addr;
505 
506 	addr = (unsigned long)(&__init_begin);
507 	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
508 		ClearPageReserved(virt_to_page(addr));
509 		init_page_count(virt_to_page(addr));
510 		free_page(addr);
511 		totalram_pages++;
512 	}
513 	printk("Freeing unused kernel memory: %ldk freed\n",
514 	       ((unsigned long)&__init_end -
515 	        (unsigned long)&__init_begin) >> 10);
516 }
517 
518 #ifdef CONFIG_BLK_DEV_INITRD
519 void free_initrd_mem(unsigned long start, unsigned long end)
520 {
521 	unsigned long p;
522 	for (p = start; p < end; p += PAGE_SIZE) {
523 		ClearPageReserved(virt_to_page(p));
524 		init_page_count(virt_to_page(p));
525 		free_page(p);
526 		totalram_pages++;
527 	}
528 	printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
529 }
530 #endif
531 
532 #ifdef CONFIG_MEMORY_HOTPLUG
533 int arch_add_memory(int nid, u64 start, u64 size)
534 {
535 	pg_data_t *pgdat;
536 	unsigned long start_pfn = start >> PAGE_SHIFT;
537 	unsigned long nr_pages = size >> PAGE_SHIFT;
538 	int ret;
539 
540 	pgdat = NODE_DATA(nid);
541 
542 	/* We only have ZONE_NORMAL, so this is easy.. */
543 	ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
544 				start_pfn, nr_pages);
545 	if (unlikely(ret))
546 		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
547 
548 	return ret;
549 }
550 EXPORT_SYMBOL_GPL(arch_add_memory);
551 
552 #ifdef CONFIG_NUMA
553 int memory_add_physaddr_to_nid(u64 addr)
554 {
555 	/* Node 0 for now.. */
556 	return 0;
557 }
558 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
559 #endif
560 
561 #endif /* CONFIG_MEMORY_HOTPLUG */
562