xref: /linux/arch/sh/mm/init.c (revision 8b1935e6a36b0967efc593d67ed3aebbfbc1f5b1)
1 /*
2  * linux/arch/sh/mm/init.c
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2002 - 2007  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/init.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  */
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/proc_fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/percpu.h>
17 #include <linux/io.h>
18 #include <linux/dma-mapping.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlb.h>
21 #include <asm/cacheflush.h>
22 #include <asm/sections.h>
23 #include <asm/cache.h>
24 #include <asm/sizes.h>
25 
26 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
27 pgd_t swapper_pg_dir[PTRS_PER_PGD];
28 
29 #ifdef CONFIG_MMU
30 static pte_t *__get_pte_phys(unsigned long addr)
31 {
32 	pgd_t *pgd;
33 	pud_t *pud;
34 	pmd_t *pmd;
35 	pte_t *pte;
36 
37 	pgd = pgd_offset_k(addr);
38 	if (pgd_none(*pgd)) {
39 		pgd_ERROR(*pgd);
40 		return NULL;
41 	}
42 
43 	pud = pud_alloc(NULL, pgd, addr);
44 	if (unlikely(!pud)) {
45 		pud_ERROR(*pud);
46 		return NULL;
47 	}
48 
49 	pmd = pmd_alloc(NULL, pud, addr);
50 	if (unlikely(!pmd)) {
51 		pmd_ERROR(*pmd);
52 		return NULL;
53 	}
54 
55 	pte = pte_offset_kernel(pmd, addr);
56 	return pte;
57 }
58 
59 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
60 {
61 	pte_t *pte;
62 
63 	pte = __get_pte_phys(addr);
64 	if (!pte_none(*pte)) {
65 		pte_ERROR(*pte);
66 		return;
67 	}
68 
69 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
70 	local_flush_tlb_one(get_asid(), addr);
71 
72 	if (pgprot_val(prot) & _PAGE_WIRED)
73 		tlb_wire_entry(NULL, addr, *pte);
74 }
75 
76 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
77 {
78 	pte_t *pte;
79 
80 	pte = __get_pte_phys(addr);
81 
82 	if (pgprot_val(prot) & _PAGE_WIRED)
83 		tlb_unwire_entry();
84 
85 	set_pte(pte, pfn_pte(0, __pgprot(0)));
86 	local_flush_tlb_one(get_asid(), addr);
87 }
88 
89 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
90 {
91 	unsigned long address = __fix_to_virt(idx);
92 
93 	if (idx >= __end_of_fixed_addresses) {
94 		BUG();
95 		return;
96 	}
97 
98 	set_pte_phys(address, phys, prot);
99 }
100 
101 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
102 {
103 	unsigned long address = __fix_to_virt(idx);
104 
105 	if (idx >= __end_of_fixed_addresses) {
106 		BUG();
107 		return;
108 	}
109 
110 	clear_pte_phys(address, prot);
111 }
112 
113 void __init page_table_range_init(unsigned long start, unsigned long end,
114 					 pgd_t *pgd_base)
115 {
116 	pgd_t *pgd;
117 	pud_t *pud;
118 	pmd_t *pmd;
119 	pte_t *pte;
120 	int i, j, k;
121 	unsigned long vaddr;
122 
123 	vaddr = start;
124 	i = __pgd_offset(vaddr);
125 	j = __pud_offset(vaddr);
126 	k = __pmd_offset(vaddr);
127 	pgd = pgd_base + i;
128 
129 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
130 		pud = (pud_t *)pgd;
131 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
132 #ifdef __PAGETABLE_PMD_FOLDED
133 			pmd = (pmd_t *)pud;
134 #else
135 			pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
136 			pud_populate(&init_mm, pud, pmd);
137 			pmd += k;
138 #endif
139 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
140 				if (pmd_none(*pmd)) {
141 					pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
142 					pmd_populate_kernel(&init_mm, pmd, pte);
143 					BUG_ON(pte != pte_offset_kernel(pmd, 0));
144 				}
145 				vaddr += PMD_SIZE;
146 			}
147 			k = 0;
148 		}
149 		j = 0;
150 	}
151 }
152 #endif	/* CONFIG_MMU */
153 
154 /*
155  * paging_init() sets up the page tables
156  */
157 void __init paging_init(void)
158 {
159 	unsigned long max_zone_pfns[MAX_NR_ZONES];
160 	unsigned long vaddr, end;
161 	int nid;
162 
163 	/* We don't need to map the kernel through the TLB, as
164 	 * it is permanatly mapped using P1. So clear the
165 	 * entire pgd. */
166 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
167 
168 	/* Set an initial value for the MMU.TTB so we don't have to
169 	 * check for a null value. */
170 	set_TTB(swapper_pg_dir);
171 
172 	/*
173 	 * Populate the relevant portions of swapper_pg_dir so that
174 	 * we can use the fixmap entries without calling kmalloc.
175 	 * pte's will be filled in by __set_fixmap().
176 	 */
177 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
178 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
179 	page_table_range_init(vaddr, end, swapper_pg_dir);
180 
181 	kmap_coherent_init();
182 
183 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
184 
185 	for_each_online_node(nid) {
186 		pg_data_t *pgdat = NODE_DATA(nid);
187 		unsigned long low, start_pfn;
188 
189 		start_pfn = pgdat->bdata->node_min_pfn;
190 		low = pgdat->bdata->node_low_pfn;
191 
192 		if (max_zone_pfns[ZONE_NORMAL] < low)
193 			max_zone_pfns[ZONE_NORMAL] = low;
194 
195 		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
196 		       nid, start_pfn, low);
197 	}
198 
199 	free_area_init_nodes(max_zone_pfns);
200 }
201 
202 /*
203  * Early initialization for any I/O MMUs we might have.
204  */
205 static void __init iommu_init(void)
206 {
207 	no_iommu_init();
208 }
209 
210 unsigned int mem_init_done = 0;
211 
212 void __init mem_init(void)
213 {
214 	int codesize, datasize, initsize;
215 	int nid;
216 
217 	iommu_init();
218 
219 	num_physpages = 0;
220 	high_memory = NULL;
221 
222 	for_each_online_node(nid) {
223 		pg_data_t *pgdat = NODE_DATA(nid);
224 		unsigned long node_pages = 0;
225 		void *node_high_memory;
226 
227 		num_physpages += pgdat->node_present_pages;
228 
229 		if (pgdat->node_spanned_pages)
230 			node_pages = free_all_bootmem_node(pgdat);
231 
232 		totalram_pages += node_pages;
233 
234 		node_high_memory = (void *)__va((pgdat->node_start_pfn +
235 						 pgdat->node_spanned_pages) <<
236 						 PAGE_SHIFT);
237 		if (node_high_memory > high_memory)
238 			high_memory = node_high_memory;
239 	}
240 
241 	/* Set this up early, so we can take care of the zero page */
242 	cpu_cache_init();
243 
244 	/* clear the zero-page */
245 	memset(empty_zero_page, 0, PAGE_SIZE);
246 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
247 
248 	vsyscall_init();
249 
250 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
251 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
252 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
253 
254 	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
255 	       "%dk data, %dk init)\n",
256 		nr_free_pages() << (PAGE_SHIFT-10),
257 		num_physpages << (PAGE_SHIFT-10),
258 		codesize >> 10,
259 		datasize >> 10,
260 		initsize >> 10);
261 
262 	printk(KERN_INFO "virtual kernel memory layout:\n"
263 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
264 #ifdef CONFIG_HIGHMEM
265 		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
266 #endif
267 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
268 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
269 #ifdef CONFIG_UNCACHED_MAPPING
270 		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
271 #endif
272 		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
273 		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
274 		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
275 		FIXADDR_START, FIXADDR_TOP,
276 		(FIXADDR_TOP - FIXADDR_START) >> 10,
277 
278 #ifdef CONFIG_HIGHMEM
279 		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
280 		(LAST_PKMAP*PAGE_SIZE) >> 10,
281 #endif
282 
283 		(unsigned long)VMALLOC_START, VMALLOC_END,
284 		(VMALLOC_END - VMALLOC_START) >> 20,
285 
286 		(unsigned long)memory_start, (unsigned long)high_memory,
287 		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
288 
289 #ifdef CONFIG_UNCACHED_MAPPING
290 		uncached_start, uncached_end, uncached_size >> 20,
291 #endif
292 
293 		(unsigned long)&__init_begin, (unsigned long)&__init_end,
294 		((unsigned long)&__init_end -
295 		 (unsigned long)&__init_begin) >> 10,
296 
297 		(unsigned long)&_etext, (unsigned long)&_edata,
298 		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
299 
300 		(unsigned long)&_text, (unsigned long)&_etext,
301 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
302 
303 	mem_init_done = 1;
304 }
305 
306 void free_initmem(void)
307 {
308 	unsigned long addr;
309 
310 	addr = (unsigned long)(&__init_begin);
311 	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
312 		ClearPageReserved(virt_to_page(addr));
313 		init_page_count(virt_to_page(addr));
314 		free_page(addr);
315 		totalram_pages++;
316 	}
317 	printk("Freeing unused kernel memory: %ldk freed\n",
318 	       ((unsigned long)&__init_end -
319 	        (unsigned long)&__init_begin) >> 10);
320 }
321 
322 #ifdef CONFIG_BLK_DEV_INITRD
323 void free_initrd_mem(unsigned long start, unsigned long end)
324 {
325 	unsigned long p;
326 	for (p = start; p < end; p += PAGE_SIZE) {
327 		ClearPageReserved(virt_to_page(p));
328 		init_page_count(virt_to_page(p));
329 		free_page(p);
330 		totalram_pages++;
331 	}
332 	printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
333 }
334 #endif
335 
336 #ifdef CONFIG_MEMORY_HOTPLUG
337 int arch_add_memory(int nid, u64 start, u64 size)
338 {
339 	pg_data_t *pgdat;
340 	unsigned long start_pfn = start >> PAGE_SHIFT;
341 	unsigned long nr_pages = size >> PAGE_SHIFT;
342 	int ret;
343 
344 	pgdat = NODE_DATA(nid);
345 
346 	/* We only have ZONE_NORMAL, so this is easy.. */
347 	ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
348 				start_pfn, nr_pages);
349 	if (unlikely(ret))
350 		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
351 
352 	return ret;
353 }
354 EXPORT_SYMBOL_GPL(arch_add_memory);
355 
356 #ifdef CONFIG_NUMA
357 int memory_add_physaddr_to_nid(u64 addr)
358 {
359 	/* Node 0 for now.. */
360 	return 0;
361 }
362 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
363 #endif
364 
365 #endif /* CONFIG_MEMORY_HOTPLUG */
366