xref: /linux/arch/xtensa/mm/init.c (revision 3c4fc7bf4c9e66fe71abcbf93f62f4ddb89b7f15)
1 /*
2  * arch/xtensa/mm/init.c
3  *
4  * Derived from MIPS, PPC.
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  *
10  * Copyright (C) 2001 - 2005 Tensilica Inc.
11  * Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
12  *
13  * Chris Zankel	<chris@zankel.net>
14  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
15  * Marc Gauthier
16  * Kevin Chea
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/memblock.h>
22 #include <linux/gfp.h>
23 #include <linux/highmem.h>
24 #include <linux/swap.h>
25 #include <linux/mman.h>
26 #include <linux/nodemask.h>
27 #include <linux/mm.h>
28 #include <linux/of_fdt.h>
29 #include <linux/dma-map-ops.h>
30 
31 #include <asm/bootparam.h>
32 #include <asm/page.h>
33 #include <asm/sections.h>
34 #include <asm/sysmem.h>
35 
36 /*
37  * Initialize the bootmem system and give it all low memory we have available.
38  */
39 
40 void __init bootmem_init(void)
41 {
42 	/* Reserve all memory below PHYS_OFFSET, as memory
43 	 * accounting doesn't work for pages below that address.
44 	 *
45 	 * If PHYS_OFFSET is zero reserve page at address 0:
46 	 * successfull allocations should never return NULL.
47 	 */
48 	memblock_reserve(0, PHYS_OFFSET ? PHYS_OFFSET : 1);
49 
50 	early_init_fdt_scan_reserved_mem();
51 
52 	if (!memblock_phys_mem_size())
53 		panic("No memory found!\n");
54 
55 	min_low_pfn = PFN_UP(memblock_start_of_DRAM());
56 	min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
57 	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
58 	max_low_pfn = min(max_pfn, MAX_LOW_PFN);
59 
60 	early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
61 		      (phys_addr_t)max_low_pfn << PAGE_SHIFT);
62 
63 	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
64 	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
65 
66 	memblock_dump_all();
67 }
68 
69 
70 void __init zones_init(void)
71 {
72 	/* All pages are DMA-able, so we put them all in the DMA zone. */
73 	unsigned long max_zone_pfn[MAX_NR_ZONES] = {
74 		[ZONE_NORMAL] = max_low_pfn,
75 #ifdef CONFIG_HIGHMEM
76 		[ZONE_HIGHMEM] = max_pfn,
77 #endif
78 	};
79 	free_area_init(max_zone_pfn);
80 }
81 
82 static void __init free_highpages(void)
83 {
84 #ifdef CONFIG_HIGHMEM
85 	unsigned long max_low = max_low_pfn;
86 	phys_addr_t range_start, range_end;
87 	u64 i;
88 
89 	/* set highmem page free */
90 	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
91 				&range_start, &range_end, NULL) {
92 		unsigned long start = PFN_UP(range_start);
93 		unsigned long end = PFN_DOWN(range_end);
94 
95 		/* Ignore complete lowmem entries */
96 		if (end <= max_low)
97 			continue;
98 
99 		/* Truncate partial highmem entries */
100 		if (start < max_low)
101 			start = max_low;
102 
103 		for (; start < end; start++)
104 			free_highmem_page(pfn_to_page(start));
105 	}
106 #endif
107 }
108 
109 /*
110  * Initialize memory pages.
111  */
112 
113 void __init mem_init(void)
114 {
115 	free_highpages();
116 
117 	max_mapnr = max_pfn - ARCH_PFN_OFFSET;
118 	high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
119 
120 	memblock_free_all();
121 
122 	pr_info("virtual kernel memory layout:\n"
123 #ifdef CONFIG_KASAN
124 		"    kasan   : 0x%08lx - 0x%08lx  (%5lu MB)\n"
125 #endif
126 #ifdef CONFIG_MMU
127 		"    vmalloc : 0x%08lx - 0x%08lx  (%5lu MB)\n"
128 #endif
129 #ifdef CONFIG_HIGHMEM
130 		"    pkmap   : 0x%08lx - 0x%08lx  (%5lu kB)\n"
131 		"    fixmap  : 0x%08lx - 0x%08lx  (%5lu kB)\n"
132 #endif
133 		"    lowmem  : 0x%08lx - 0x%08lx  (%5lu MB)\n"
134 		"    .text   : 0x%08lx - 0x%08lx  (%5lu kB)\n"
135 		"    .rodata : 0x%08lx - 0x%08lx  (%5lu kB)\n"
136 		"    .data   : 0x%08lx - 0x%08lx  (%5lu kB)\n"
137 		"    .init   : 0x%08lx - 0x%08lx  (%5lu kB)\n"
138 		"    .bss    : 0x%08lx - 0x%08lx  (%5lu kB)\n",
139 #ifdef CONFIG_KASAN
140 		KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE,
141 		KASAN_SHADOW_SIZE >> 20,
142 #endif
143 #ifdef CONFIG_MMU
144 		VMALLOC_START, VMALLOC_END,
145 		(VMALLOC_END - VMALLOC_START) >> 20,
146 #ifdef CONFIG_HIGHMEM
147 		PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
148 		(LAST_PKMAP*PAGE_SIZE) >> 10,
149 		FIXADDR_START, FIXADDR_END,
150 		(FIXADDR_END - FIXADDR_START) >> 10,
151 #endif
152 		PAGE_OFFSET, PAGE_OFFSET +
153 		(max_low_pfn - min_low_pfn) * PAGE_SIZE,
154 #else
155 		min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE,
156 #endif
157 		((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
158 		(unsigned long)_text, (unsigned long)_etext,
159 		(unsigned long)(_etext - _text) >> 10,
160 		(unsigned long)__start_rodata, (unsigned long)__end_rodata,
161 		(unsigned long)(__end_rodata - __start_rodata) >> 10,
162 		(unsigned long)_sdata, (unsigned long)_edata,
163 		(unsigned long)(_edata - _sdata) >> 10,
164 		(unsigned long)__init_begin, (unsigned long)__init_end,
165 		(unsigned long)(__init_end - __init_begin) >> 10,
166 		(unsigned long)__bss_start, (unsigned long)__bss_stop,
167 		(unsigned long)(__bss_stop - __bss_start) >> 10);
168 }
169 
170 static void __init parse_memmap_one(char *p)
171 {
172 	char *oldp;
173 	unsigned long start_at, mem_size;
174 
175 	if (!p)
176 		return;
177 
178 	oldp = p;
179 	mem_size = memparse(p, &p);
180 	if (p == oldp)
181 		return;
182 
183 	switch (*p) {
184 	case '@':
185 		start_at = memparse(p + 1, &p);
186 		memblock_add(start_at, mem_size);
187 		break;
188 
189 	case '$':
190 		start_at = memparse(p + 1, &p);
191 		memblock_reserve(start_at, mem_size);
192 		break;
193 
194 	case 0:
195 		memblock_reserve(mem_size, -mem_size);
196 		break;
197 
198 	default:
199 		pr_warn("Unrecognized memmap syntax: %s\n", p);
200 		break;
201 	}
202 }
203 
204 static int __init parse_memmap_opt(char *str)
205 {
206 	while (str) {
207 		char *k = strchr(str, ',');
208 
209 		if (k)
210 			*k++ = 0;
211 
212 		parse_memmap_one(str);
213 		str = k;
214 	}
215 
216 	return 0;
217 }
218 early_param("memmap", parse_memmap_opt);
219 
220 #ifdef CONFIG_MMU
221 static const pgprot_t protection_map[16] = {
222 	[VM_NONE]					= PAGE_NONE,
223 	[VM_READ]					= PAGE_READONLY,
224 	[VM_WRITE]					= PAGE_COPY,
225 	[VM_WRITE | VM_READ]				= PAGE_COPY,
226 	[VM_EXEC]					= PAGE_READONLY_EXEC,
227 	[VM_EXEC | VM_READ]				= PAGE_READONLY_EXEC,
228 	[VM_EXEC | VM_WRITE]				= PAGE_COPY_EXEC,
229 	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_EXEC,
230 	[VM_SHARED]					= PAGE_NONE,
231 	[VM_SHARED | VM_READ]				= PAGE_READONLY,
232 	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
233 	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
234 	[VM_SHARED | VM_EXEC]				= PAGE_READONLY_EXEC,
235 	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_EXEC,
236 	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_EXEC,
237 	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_EXEC
238 };
239 DECLARE_VM_GET_PAGE_PROT
240 #endif
241