xref: /linux/arch/mips/mm/init.c (revision 17afab1de42236ee2f6235f4383cc6f3f13f8a10)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 2000 Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/bug.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/smp.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/pagemap.h>
22 #include <linux/ptrace.h>
23 #include <linux/mman.h>
24 #include <linux/mm.h>
25 #include <linux/bootmem.h>
26 #include <linux/highmem.h>
27 #include <linux/swap.h>
28 #include <linux/proc_fs.h>
29 #include <linux/pfn.h>
30 #include <linux/hardirq.h>
31 #include <linux/gfp.h>
32 
33 #include <asm/asm-offsets.h>
34 #include <asm/bootinfo.h>
35 #include <asm/cachectl.h>
36 #include <asm/cpu.h>
37 #include <asm/dma.h>
38 #include <asm/kmap_types.h>
39 #include <asm/mmu_context.h>
40 #include <asm/sections.h>
41 #include <asm/pgtable.h>
42 #include <asm/pgalloc.h>
43 #include <asm/tlb.h>
44 #include <asm/fixmap.h>
45 
46 /* Atomicity and interruptability */
47 #ifdef CONFIG_MIPS_MT_SMTC
48 
49 #include <asm/mipsmtregs.h>
50 
51 #define ENTER_CRITICAL(flags) \
52 	{ \
53 	unsigned int mvpflags; \
54 	local_irq_save(flags);\
55 	mvpflags = dvpe()
56 #define EXIT_CRITICAL(flags) \
57 	evpe(mvpflags); \
58 	local_irq_restore(flags); \
59 	}
60 #else
61 
62 #define ENTER_CRITICAL(flags) local_irq_save(flags)
63 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
64 
65 #endif /* CONFIG_MIPS_MT_SMTC */
66 
67 /*
68  * We have up to 8 empty zeroed pages so we can map one of the right colour
69  * when needed.	 This is necessary only on R4000 / R4400 SC and MC versions
70  * where we have to avoid VCED / VECI exceptions for good performance at
71  * any price.  Since page is never written to after the initialization we
72  * don't have to care about aliases on other CPUs.
73  */
74 unsigned long empty_zero_page, zero_page_mask;
75 EXPORT_SYMBOL_GPL(empty_zero_page);
76 
77 /*
78  * Not static inline because used by IP27 special magic initialization code
79  */
80 void setup_zero_pages(void)
81 {
82 	unsigned int order, i;
83 	struct page *page;
84 
85 	if (cpu_has_vce)
86 		order = 3;
87 	else
88 		order = 0;
89 
90 	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
91 	if (!empty_zero_page)
92 		panic("Oh boy, that early out of memory?");
93 
94 	page = virt_to_page((void *)empty_zero_page);
95 	split_page(page, order);
96 	for (i = 0; i < (1 << order); i++, page++)
97 		mark_page_reserved(page);
98 
99 	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
100 }
101 
102 #ifdef CONFIG_MIPS_MT_SMTC
103 static pte_t *kmap_coherent_pte;
104 static void __init kmap_coherent_init(void)
105 {
106 	unsigned long vaddr;
107 
108 	/* cache the first coherent kmap pte */
109 	vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
110 	kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
111 }
112 #else
113 static inline void kmap_coherent_init(void) {}
114 #endif
115 
116 void *kmap_coherent(struct page *page, unsigned long addr)
117 {
118 	enum fixed_addresses idx;
119 	unsigned long vaddr, flags, entrylo;
120 	unsigned long old_ctx;
121 	pte_t pte;
122 	int tlbidx;
123 
124 	BUG_ON(Page_dcache_dirty(page));
125 
126 	inc_preempt_count();
127 	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
128 #ifdef CONFIG_MIPS_MT_SMTC
129 	idx += FIX_N_COLOURS * smp_processor_id() +
130 		(in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
131 #else
132 	idx += in_interrupt() ? FIX_N_COLOURS : 0;
133 #endif
134 	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
135 	pte = mk_pte(page, PAGE_KERNEL);
136 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
137 	entrylo = pte.pte_high;
138 #else
139 	entrylo = pte_to_entrylo(pte_val(pte));
140 #endif
141 
142 	ENTER_CRITICAL(flags);
143 	old_ctx = read_c0_entryhi();
144 	write_c0_entryhi(vaddr & (PAGE_MASK << 1));
145 	write_c0_entrylo0(entrylo);
146 	write_c0_entrylo1(entrylo);
147 #ifdef CONFIG_MIPS_MT_SMTC
148 	set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
149 	/* preload TLB instead of local_flush_tlb_one() */
150 	mtc0_tlbw_hazard();
151 	tlb_probe();
152 	tlb_probe_hazard();
153 	tlbidx = read_c0_index();
154 	mtc0_tlbw_hazard();
155 	if (tlbidx < 0)
156 		tlb_write_random();
157 	else
158 		tlb_write_indexed();
159 #else
160 	tlbidx = read_c0_wired();
161 	write_c0_wired(tlbidx + 1);
162 	write_c0_index(tlbidx);
163 	mtc0_tlbw_hazard();
164 	tlb_write_indexed();
165 #endif
166 	tlbw_use_hazard();
167 	write_c0_entryhi(old_ctx);
168 	EXIT_CRITICAL(flags);
169 
170 	return (void*) vaddr;
171 }
172 
173 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
174 
175 void kunmap_coherent(void)
176 {
177 #ifndef CONFIG_MIPS_MT_SMTC
178 	unsigned int wired;
179 	unsigned long flags, old_ctx;
180 
181 	ENTER_CRITICAL(flags);
182 	old_ctx = read_c0_entryhi();
183 	wired = read_c0_wired() - 1;
184 	write_c0_wired(wired);
185 	write_c0_index(wired);
186 	write_c0_entryhi(UNIQUE_ENTRYHI(wired));
187 	write_c0_entrylo0(0);
188 	write_c0_entrylo1(0);
189 	mtc0_tlbw_hazard();
190 	tlb_write_indexed();
191 	tlbw_use_hazard();
192 	write_c0_entryhi(old_ctx);
193 	EXIT_CRITICAL(flags);
194 #endif
195 	dec_preempt_count();
196 	preempt_check_resched();
197 }
198 
199 void copy_user_highpage(struct page *to, struct page *from,
200 	unsigned long vaddr, struct vm_area_struct *vma)
201 {
202 	void *vfrom, *vto;
203 
204 	vto = kmap_atomic(to);
205 	if (cpu_has_dc_aliases &&
206 	    page_mapped(from) && !Page_dcache_dirty(from)) {
207 		vfrom = kmap_coherent(from, vaddr);
208 		copy_page(vto, vfrom);
209 		kunmap_coherent();
210 	} else {
211 		vfrom = kmap_atomic(from);
212 		copy_page(vto, vfrom);
213 		kunmap_atomic(vfrom);
214 	}
215 	if ((!cpu_has_ic_fills_f_dc) ||
216 	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
217 		flush_data_cache_page((unsigned long)vto);
218 	kunmap_atomic(vto);
219 	/* Make sure this page is cleared on other CPU's too before using it */
220 	smp_wmb();
221 }
222 
223 void copy_to_user_page(struct vm_area_struct *vma,
224 	struct page *page, unsigned long vaddr, void *dst, const void *src,
225 	unsigned long len)
226 {
227 	if (cpu_has_dc_aliases &&
228 	    page_mapped(page) && !Page_dcache_dirty(page)) {
229 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
230 		memcpy(vto, src, len);
231 		kunmap_coherent();
232 	} else {
233 		memcpy(dst, src, len);
234 		if (cpu_has_dc_aliases)
235 			SetPageDcacheDirty(page);
236 	}
237 	if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
238 		flush_cache_page(vma, vaddr, page_to_pfn(page));
239 }
240 
241 void copy_from_user_page(struct vm_area_struct *vma,
242 	struct page *page, unsigned long vaddr, void *dst, const void *src,
243 	unsigned long len)
244 {
245 	if (cpu_has_dc_aliases &&
246 	    page_mapped(page) && !Page_dcache_dirty(page)) {
247 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
248 		memcpy(dst, vfrom, len);
249 		kunmap_coherent();
250 	} else {
251 		memcpy(dst, src, len);
252 		if (cpu_has_dc_aliases)
253 			SetPageDcacheDirty(page);
254 	}
255 }
256 
257 void __init fixrange_init(unsigned long start, unsigned long end,
258 	pgd_t *pgd_base)
259 {
260 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
261 	pgd_t *pgd;
262 	pud_t *pud;
263 	pmd_t *pmd;
264 	pte_t *pte;
265 	int i, j, k;
266 	unsigned long vaddr;
267 
268 	vaddr = start;
269 	i = __pgd_offset(vaddr);
270 	j = __pud_offset(vaddr);
271 	k = __pmd_offset(vaddr);
272 	pgd = pgd_base + i;
273 
274 	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
275 		pud = (pud_t *)pgd;
276 		for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
277 			pmd = (pmd_t *)pud;
278 			for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
279 				if (pmd_none(*pmd)) {
280 					pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
281 					set_pmd(pmd, __pmd((unsigned long)pte));
282 					BUG_ON(pte != pte_offset_kernel(pmd, 0));
283 				}
284 				vaddr += PMD_SIZE;
285 			}
286 			k = 0;
287 		}
288 		j = 0;
289 	}
290 #endif
291 }
292 
293 #ifndef CONFIG_NEED_MULTIPLE_NODES
294 int page_is_ram(unsigned long pagenr)
295 {
296 	int i;
297 
298 	for (i = 0; i < boot_mem_map.nr_map; i++) {
299 		unsigned long addr, end;
300 
301 		switch (boot_mem_map.map[i].type) {
302 		case BOOT_MEM_RAM:
303 		case BOOT_MEM_INIT_RAM:
304 			break;
305 		default:
306 			/* not usable memory */
307 			continue;
308 		}
309 
310 		addr = PFN_UP(boot_mem_map.map[i].addr);
311 		end = PFN_DOWN(boot_mem_map.map[i].addr +
312 			       boot_mem_map.map[i].size);
313 
314 		if (pagenr >= addr && pagenr < end)
315 			return 1;
316 	}
317 
318 	return 0;
319 }
320 
321 void __init paging_init(void)
322 {
323 	unsigned long max_zone_pfns[MAX_NR_ZONES];
324 	unsigned long lastpfn __maybe_unused;
325 
326 	pagetable_init();
327 
328 #ifdef CONFIG_HIGHMEM
329 	kmap_init();
330 #endif
331 	kmap_coherent_init();
332 
333 #ifdef CONFIG_ZONE_DMA
334 	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
335 #endif
336 #ifdef CONFIG_ZONE_DMA32
337 	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
338 #endif
339 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
340 	lastpfn = max_low_pfn;
341 #ifdef CONFIG_HIGHMEM
342 	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
343 	lastpfn = highend_pfn;
344 
345 	if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
346 		printk(KERN_WARNING "This processor doesn't support highmem."
347 		       " %ldk highmem ignored\n",
348 		       (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
349 		max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
350 		lastpfn = max_low_pfn;
351 	}
352 #endif
353 
354 	free_area_init_nodes(max_zone_pfns);
355 }
356 
357 #ifdef CONFIG_64BIT
358 static struct kcore_list kcore_kseg0;
359 #endif
360 
361 void __init mem_init(void)
362 {
363 	unsigned long codesize, reservedpages, datasize, initsize;
364 	unsigned long tmp, ram;
365 
366 #ifdef CONFIG_HIGHMEM
367 #ifdef CONFIG_DISCONTIGMEM
368 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
369 #endif
370 	max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
371 #else
372 	max_mapnr = max_low_pfn;
373 #endif
374 	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
375 
376 	totalram_pages += free_all_bootmem();
377 	setup_zero_pages();	/* Setup zeroed pages.  */
378 
379 	reservedpages = ram = 0;
380 	for (tmp = 0; tmp < max_low_pfn; tmp++)
381 		if (page_is_ram(tmp) && pfn_valid(tmp)) {
382 			ram++;
383 			if (PageReserved(pfn_to_page(tmp)))
384 				reservedpages++;
385 		}
386 	num_physpages = ram;
387 
388 #ifdef CONFIG_HIGHMEM
389 	for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
390 		struct page *page = pfn_to_page(tmp);
391 
392 		if (!page_is_ram(tmp)) {
393 			SetPageReserved(page);
394 			continue;
395 		}
396 		free_highmem_page(page);
397 	}
398 	num_physpages += totalhigh_pages;
399 #endif
400 
401 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
402 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
403 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
404 
405 #ifdef CONFIG_64BIT
406 	if ((unsigned long) &_text > (unsigned long) CKSEG0)
407 		/* The -4 is a hack so that user tools don't have to handle
408 		   the overflow.  */
409 		kclist_add(&kcore_kseg0, (void *) CKSEG0,
410 				0x80000000 - 4, KCORE_TEXT);
411 #endif
412 
413 	printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
414 	       "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
415 	       nr_free_pages() << (PAGE_SHIFT-10),
416 	       ram << (PAGE_SHIFT-10),
417 	       codesize >> 10,
418 	       reservedpages << (PAGE_SHIFT-10),
419 	       datasize >> 10,
420 	       initsize >> 10,
421 	       totalhigh_pages << (PAGE_SHIFT-10));
422 }
423 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
424 
425 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
426 {
427 	unsigned long pfn;
428 
429 	for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
430 		struct page *page = pfn_to_page(pfn);
431 		void *addr = phys_to_virt(PFN_PHYS(pfn));
432 
433 		memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
434 		free_reserved_page(page);
435 	}
436 	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
437 }
438 
439 #ifdef CONFIG_BLK_DEV_INITRD
440 void free_initrd_mem(unsigned long start, unsigned long end)
441 {
442 	free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
443 }
444 #endif
445 
446 void __init_refok free_initmem(void)
447 {
448 	prom_free_prom_memory();
449 	free_initmem_default(POISON_FREE_INITMEM);
450 }
451 
452 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
453 unsigned long pgd_current[NR_CPUS];
454 #endif
455 
456 /*
457  * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
458  * are constants.  So we use the variants from asm-offset.h until that gcc
459  * will officially be retired.
460  *
461  * Align swapper_pg_dir in to 64K, allows its address to be loaded
462  * with a single LUI instruction in the TLB handlers.  If we used
463  * __aligned(64K), its size would get rounded up to the alignment
464  * size, and waste space.  So we place it in its own section and align
465  * it in the linker script.
466  */
467 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
468 #ifndef __PAGETABLE_PMD_FOLDED
469 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
470 #endif
471 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
472