xref: /linux/arch/mips/mm/init.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 2000 Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/bug.h>
12 #include <linux/init.h>
13 #include <linux/export.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/smp.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/pagemap.h>
22 #include <linux/ptrace.h>
23 #include <linux/mman.h>
24 #include <linux/mm.h>
25 #include <linux/bootmem.h>
26 #include <linux/highmem.h>
27 #include <linux/swap.h>
28 #include <linux/proc_fs.h>
29 #include <linux/pfn.h>
30 #include <linux/hardirq.h>
31 #include <linux/gfp.h>
32 #include <linux/kcore.h>
33 #include <linux/export.h>
34 
35 #include <asm/asm-offsets.h>
36 #include <asm/bootinfo.h>
37 #include <asm/cachectl.h>
38 #include <asm/cpu.h>
39 #include <asm/dma.h>
40 #include <asm/kmap_types.h>
41 #include <asm/maar.h>
42 #include <asm/mmu_context.h>
43 #include <asm/sections.h>
44 #include <asm/pgtable.h>
45 #include <asm/pgalloc.h>
46 #include <asm/tlb.h>
47 #include <asm/fixmap.h>
48 #include <asm/maar.h>
49 
50 /*
51  * We have up to 8 empty zeroed pages so we can map one of the right colour
52  * when needed.	 This is necessary only on R4000 / R4400 SC and MC versions
53  * where we have to avoid VCED / VECI exceptions for good performance at
54  * any price.  Since page is never written to after the initialization we
55  * don't have to care about aliases on other CPUs.
56  */
57 unsigned long empty_zero_page, zero_page_mask;
58 EXPORT_SYMBOL_GPL(empty_zero_page);
59 EXPORT_SYMBOL(zero_page_mask);
60 
61 /*
62  * Not static inline because used by IP27 special magic initialization code
63  */
64 void setup_zero_pages(void)
65 {
66 	unsigned int order, i;
67 	struct page *page;
68 
69 	if (cpu_has_vce)
70 		order = 3;
71 	else
72 		order = 0;
73 
74 	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
75 	if (!empty_zero_page)
76 		panic("Oh boy, that early out of memory?");
77 
78 	page = virt_to_page((void *)empty_zero_page);
79 	split_page(page, order);
80 	for (i = 0; i < (1 << order); i++, page++)
81 		mark_page_reserved(page);
82 
83 	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
84 }
85 
86 static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
87 {
88 	enum fixed_addresses idx;
89 	unsigned long vaddr, flags, entrylo;
90 	unsigned long old_ctx;
91 	pte_t pte;
92 	int tlbidx;
93 
94 	BUG_ON(Page_dcache_dirty(page));
95 
96 	preempt_disable();
97 	pagefault_disable();
98 	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
99 	idx += in_interrupt() ? FIX_N_COLOURS : 0;
100 	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
101 	pte = mk_pte(page, prot);
102 #if defined(CONFIG_XPA)
103 	entrylo = pte_to_entrylo(pte.pte_high);
104 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
105 	entrylo = pte.pte_high;
106 #else
107 	entrylo = pte_to_entrylo(pte_val(pte));
108 #endif
109 
110 	local_irq_save(flags);
111 	old_ctx = read_c0_entryhi();
112 	write_c0_entryhi(vaddr & (PAGE_MASK << 1));
113 	write_c0_entrylo0(entrylo);
114 	write_c0_entrylo1(entrylo);
115 #ifdef CONFIG_XPA
116 	if (cpu_has_xpa) {
117 		entrylo = (pte.pte_low & _PFNX_MASK);
118 		writex_c0_entrylo0(entrylo);
119 		writex_c0_entrylo1(entrylo);
120 	}
121 #endif
122 	tlbidx = num_wired_entries();
123 	write_c0_wired(tlbidx + 1);
124 	write_c0_index(tlbidx);
125 	mtc0_tlbw_hazard();
126 	tlb_write_indexed();
127 	tlbw_use_hazard();
128 	write_c0_entryhi(old_ctx);
129 	local_irq_restore(flags);
130 
131 	return (void*) vaddr;
132 }
133 
134 void *kmap_coherent(struct page *page, unsigned long addr)
135 {
136 	return __kmap_pgprot(page, addr, PAGE_KERNEL);
137 }
138 
139 void *kmap_noncoherent(struct page *page, unsigned long addr)
140 {
141 	return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
142 }
143 
144 void kunmap_coherent(void)
145 {
146 	unsigned int wired;
147 	unsigned long flags, old_ctx;
148 
149 	local_irq_save(flags);
150 	old_ctx = read_c0_entryhi();
151 	wired = num_wired_entries() - 1;
152 	write_c0_wired(wired);
153 	write_c0_index(wired);
154 	write_c0_entryhi(UNIQUE_ENTRYHI(wired));
155 	write_c0_entrylo0(0);
156 	write_c0_entrylo1(0);
157 	mtc0_tlbw_hazard();
158 	tlb_write_indexed();
159 	tlbw_use_hazard();
160 	write_c0_entryhi(old_ctx);
161 	local_irq_restore(flags);
162 	pagefault_enable();
163 	preempt_enable();
164 }
165 
166 void copy_user_highpage(struct page *to, struct page *from,
167 	unsigned long vaddr, struct vm_area_struct *vma)
168 {
169 	void *vfrom, *vto;
170 
171 	vto = kmap_atomic(to);
172 	if (cpu_has_dc_aliases &&
173 	    page_mapcount(from) && !Page_dcache_dirty(from)) {
174 		vfrom = kmap_coherent(from, vaddr);
175 		copy_page(vto, vfrom);
176 		kunmap_coherent();
177 	} else {
178 		vfrom = kmap_atomic(from);
179 		copy_page(vto, vfrom);
180 		kunmap_atomic(vfrom);
181 	}
182 	if ((!cpu_has_ic_fills_f_dc) ||
183 	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
184 		flush_data_cache_page((unsigned long)vto);
185 	kunmap_atomic(vto);
186 	/* Make sure this page is cleared on other CPU's too before using it */
187 	smp_wmb();
188 }
189 
190 void copy_to_user_page(struct vm_area_struct *vma,
191 	struct page *page, unsigned long vaddr, void *dst, const void *src,
192 	unsigned long len)
193 {
194 	if (cpu_has_dc_aliases &&
195 	    page_mapcount(page) && !Page_dcache_dirty(page)) {
196 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
197 		memcpy(vto, src, len);
198 		kunmap_coherent();
199 	} else {
200 		memcpy(dst, src, len);
201 		if (cpu_has_dc_aliases)
202 			SetPageDcacheDirty(page);
203 	}
204 	if (vma->vm_flags & VM_EXEC)
205 		flush_cache_page(vma, vaddr, page_to_pfn(page));
206 }
207 
208 void copy_from_user_page(struct vm_area_struct *vma,
209 	struct page *page, unsigned long vaddr, void *dst, const void *src,
210 	unsigned long len)
211 {
212 	if (cpu_has_dc_aliases &&
213 	    page_mapcount(page) && !Page_dcache_dirty(page)) {
214 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
215 		memcpy(dst, vfrom, len);
216 		kunmap_coherent();
217 	} else {
218 		memcpy(dst, src, len);
219 		if (cpu_has_dc_aliases)
220 			SetPageDcacheDirty(page);
221 	}
222 }
223 EXPORT_SYMBOL_GPL(copy_from_user_page);
224 
225 void __init fixrange_init(unsigned long start, unsigned long end,
226 	pgd_t *pgd_base)
227 {
228 #ifdef CONFIG_HIGHMEM
229 	pgd_t *pgd;
230 	pud_t *pud;
231 	pmd_t *pmd;
232 	pte_t *pte;
233 	int i, j, k;
234 	unsigned long vaddr;
235 
236 	vaddr = start;
237 	i = __pgd_offset(vaddr);
238 	j = __pud_offset(vaddr);
239 	k = __pmd_offset(vaddr);
240 	pgd = pgd_base + i;
241 
242 	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
243 		pud = (pud_t *)pgd;
244 		for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
245 			pmd = (pmd_t *)pud;
246 			for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
247 				if (pmd_none(*pmd)) {
248 					pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
249 					set_pmd(pmd, __pmd((unsigned long)pte));
250 					BUG_ON(pte != pte_offset_kernel(pmd, 0));
251 				}
252 				vaddr += PMD_SIZE;
253 			}
254 			k = 0;
255 		}
256 		j = 0;
257 	}
258 #endif
259 }
260 
261 unsigned __weak platform_maar_init(unsigned num_pairs)
262 {
263 	struct maar_config cfg[BOOT_MEM_MAP_MAX];
264 	unsigned i, num_configured, num_cfg = 0;
265 
266 	for (i = 0; i < boot_mem_map.nr_map; i++) {
267 		switch (boot_mem_map.map[i].type) {
268 		case BOOT_MEM_RAM:
269 		case BOOT_MEM_INIT_RAM:
270 			break;
271 		default:
272 			continue;
273 		}
274 
275 		/* Round lower up */
276 		cfg[num_cfg].lower = boot_mem_map.map[i].addr;
277 		cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
278 
279 		/* Round upper down */
280 		cfg[num_cfg].upper = boot_mem_map.map[i].addr +
281 					boot_mem_map.map[i].size;
282 		cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
283 
284 		cfg[num_cfg].attrs = MIPS_MAAR_S;
285 		num_cfg++;
286 	}
287 
288 	num_configured = maar_config(cfg, num_cfg, num_pairs);
289 	if (num_configured < num_cfg)
290 		pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
291 			num_pairs, num_cfg);
292 
293 	return num_configured;
294 }
295 
296 void maar_init(void)
297 {
298 	unsigned num_maars, used, i;
299 	phys_addr_t lower, upper, attr;
300 	static struct {
301 		struct maar_config cfgs[3];
302 		unsigned used;
303 	} recorded = { { { 0 } }, 0 };
304 
305 	if (!cpu_has_maar)
306 		return;
307 
308 	/* Detect the number of MAARs */
309 	write_c0_maari(~0);
310 	back_to_back_c0_hazard();
311 	num_maars = read_c0_maari() + 1;
312 
313 	/* MAARs should be in pairs */
314 	WARN_ON(num_maars % 2);
315 
316 	/* Set MAARs using values we recorded already */
317 	if (recorded.used) {
318 		used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
319 		BUG_ON(used != recorded.used);
320 	} else {
321 		/* Configure the required MAARs */
322 		used = platform_maar_init(num_maars / 2);
323 	}
324 
325 	/* Disable any further MAARs */
326 	for (i = (used * 2); i < num_maars; i++) {
327 		write_c0_maari(i);
328 		back_to_back_c0_hazard();
329 		write_c0_maar(0);
330 		back_to_back_c0_hazard();
331 	}
332 
333 	if (recorded.used)
334 		return;
335 
336 	pr_info("MAAR configuration:\n");
337 	for (i = 0; i < num_maars; i += 2) {
338 		write_c0_maari(i);
339 		back_to_back_c0_hazard();
340 		upper = read_c0_maar();
341 
342 		write_c0_maari(i + 1);
343 		back_to_back_c0_hazard();
344 		lower = read_c0_maar();
345 
346 		attr = lower & upper;
347 		lower = (lower & MIPS_MAAR_ADDR) << 4;
348 		upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
349 
350 		pr_info("  [%d]: ", i / 2);
351 		if (!(attr & MIPS_MAAR_VL)) {
352 			pr_cont("disabled\n");
353 			continue;
354 		}
355 
356 		pr_cont("%pa-%pa", &lower, &upper);
357 
358 		if (attr & MIPS_MAAR_S)
359 			pr_cont(" speculate");
360 
361 		pr_cont("\n");
362 
363 		/* Record the setup for use on secondary CPUs */
364 		if (used <= ARRAY_SIZE(recorded.cfgs)) {
365 			recorded.cfgs[recorded.used].lower = lower;
366 			recorded.cfgs[recorded.used].upper = upper;
367 			recorded.cfgs[recorded.used].attrs = attr;
368 			recorded.used++;
369 		}
370 	}
371 }
372 
373 #ifndef CONFIG_NEED_MULTIPLE_NODES
374 int page_is_ram(unsigned long pagenr)
375 {
376 	int i;
377 
378 	for (i = 0; i < boot_mem_map.nr_map; i++) {
379 		unsigned long addr, end;
380 
381 		switch (boot_mem_map.map[i].type) {
382 		case BOOT_MEM_RAM:
383 		case BOOT_MEM_INIT_RAM:
384 			break;
385 		default:
386 			/* not usable memory */
387 			continue;
388 		}
389 
390 		addr = PFN_UP(boot_mem_map.map[i].addr);
391 		end = PFN_DOWN(boot_mem_map.map[i].addr +
392 			       boot_mem_map.map[i].size);
393 
394 		if (pagenr >= addr && pagenr < end)
395 			return 1;
396 	}
397 
398 	return 0;
399 }
400 
401 void __init paging_init(void)
402 {
403 	unsigned long max_zone_pfns[MAX_NR_ZONES];
404 	unsigned long lastpfn __maybe_unused;
405 
406 	pagetable_init();
407 
408 #ifdef CONFIG_HIGHMEM
409 	kmap_init();
410 #endif
411 #ifdef CONFIG_ZONE_DMA
412 	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
413 #endif
414 #ifdef CONFIG_ZONE_DMA32
415 	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
416 #endif
417 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
418 	lastpfn = max_low_pfn;
419 #ifdef CONFIG_HIGHMEM
420 	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
421 	lastpfn = highend_pfn;
422 
423 	if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
424 		printk(KERN_WARNING "This processor doesn't support highmem."
425 		       " %ldk highmem ignored\n",
426 		       (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
427 		max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
428 		lastpfn = max_low_pfn;
429 	}
430 #endif
431 
432 	free_area_init_nodes(max_zone_pfns);
433 }
434 
435 #ifdef CONFIG_64BIT
436 static struct kcore_list kcore_kseg0;
437 #endif
438 
439 static inline void mem_init_free_highmem(void)
440 {
441 #ifdef CONFIG_HIGHMEM
442 	unsigned long tmp;
443 
444 	if (cpu_has_dc_aliases)
445 		return;
446 
447 	for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
448 		struct page *page = pfn_to_page(tmp);
449 
450 		if (!page_is_ram(tmp))
451 			SetPageReserved(page);
452 		else
453 			free_highmem_page(page);
454 	}
455 #endif
456 }
457 
458 void __init mem_init(void)
459 {
460 #ifdef CONFIG_HIGHMEM
461 #ifdef CONFIG_DISCONTIGMEM
462 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
463 #endif
464 	max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
465 #else
466 	max_mapnr = max_low_pfn;
467 #endif
468 	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
469 
470 	maar_init();
471 	free_all_bootmem();
472 	setup_zero_pages();	/* Setup zeroed pages.  */
473 	mem_init_free_highmem();
474 	mem_init_print_info(NULL);
475 
476 #ifdef CONFIG_64BIT
477 	if ((unsigned long) &_text > (unsigned long) CKSEG0)
478 		/* The -4 is a hack so that user tools don't have to handle
479 		   the overflow.  */
480 		kclist_add(&kcore_kseg0, (void *) CKSEG0,
481 				0x80000000 - 4, KCORE_TEXT);
482 #endif
483 }
484 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
485 
486 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
487 {
488 	unsigned long pfn;
489 
490 	for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
491 		struct page *page = pfn_to_page(pfn);
492 		void *addr = phys_to_virt(PFN_PHYS(pfn));
493 
494 		memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
495 		free_reserved_page(page);
496 	}
497 	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
498 }
499 
500 #ifdef CONFIG_BLK_DEV_INITRD
501 void free_initrd_mem(unsigned long start, unsigned long end)
502 {
503 	free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
504 			   "initrd");
505 }
506 #endif
507 
508 void (*free_init_pages_eva)(void *begin, void *end) = NULL;
509 
510 void __ref free_initmem(void)
511 {
512 	prom_free_prom_memory();
513 	/*
514 	 * Let the platform define a specific function to free the
515 	 * init section since EVA may have used any possible mapping
516 	 * between virtual and physical addresses.
517 	 */
518 	if (free_init_pages_eva)
519 		free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
520 	else
521 		free_initmem_default(POISON_FREE_INITMEM);
522 }
523 
524 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
525 unsigned long pgd_current[NR_CPUS];
526 #endif
527 
528 /*
529  * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
530  * are constants.  So we use the variants from asm-offset.h until that gcc
531  * will officially be retired.
532  *
533  * Align swapper_pg_dir in to 64K, allows its address to be loaded
534  * with a single LUI instruction in the TLB handlers.  If we used
535  * __aligned(64K), its size would get rounded up to the alignment
536  * size, and waste space.  So we place it in its own section and align
537  * it in the linker script.
538  */
539 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
540 #ifndef __PAGETABLE_PUD_FOLDED
541 pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
542 #endif
543 #ifndef __PAGETABLE_PMD_FOLDED
544 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
545 EXPORT_SYMBOL_GPL(invalid_pmd_table);
546 #endif
547 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
548 EXPORT_SYMBOL(invalid_pte_table);
549