xref: /linux/arch/powerpc/mm/init_64.c (revision b8d312aa075f33282565467662c4628dae0a2aff)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  PowerPC version
4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5  *
6  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
8  *    Copyright (C) 1996 Paul Mackerras
9  *
10  *  Derived from "arch/i386/mm/init.c"
11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12  *
13  *  Dave Engebretsen <engebret@us.ibm.com>
14  *      Rework for PPC64 port.
15  */
16 
17 #undef DEBUG
18 
19 #include <linux/signal.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/types.h>
25 #include <linux/mman.h>
26 #include <linux/mm.h>
27 #include <linux/swap.h>
28 #include <linux/stddef.h>
29 #include <linux/vmalloc.h>
30 #include <linux/init.h>
31 #include <linux/delay.h>
32 #include <linux/highmem.h>
33 #include <linux/idr.h>
34 #include <linux/nodemask.h>
35 #include <linux/module.h>
36 #include <linux/poison.h>
37 #include <linux/memblock.h>
38 #include <linux/hugetlb.h>
39 #include <linux/slab.h>
40 #include <linux/of_fdt.h>
41 #include <linux/libfdt.h>
42 #include <linux/memremap.h>
43 
44 #include <asm/pgalloc.h>
45 #include <asm/page.h>
46 #include <asm/prom.h>
47 #include <asm/rtas.h>
48 #include <asm/io.h>
49 #include <asm/mmu_context.h>
50 #include <asm/pgtable.h>
51 #include <asm/mmu.h>
52 #include <linux/uaccess.h>
53 #include <asm/smp.h>
54 #include <asm/machdep.h>
55 #include <asm/tlb.h>
56 #include <asm/eeh.h>
57 #include <asm/processor.h>
58 #include <asm/mmzone.h>
59 #include <asm/cputable.h>
60 #include <asm/sections.h>
61 #include <asm/iommu.h>
62 #include <asm/vdso.h>
63 
64 #include <mm/mmu_decl.h>
65 
66 phys_addr_t memstart_addr = ~0;
67 EXPORT_SYMBOL_GPL(memstart_addr);
68 phys_addr_t kernstart_addr;
69 EXPORT_SYMBOL_GPL(kernstart_addr);
70 
71 #ifdef CONFIG_SPARSEMEM_VMEMMAP
72 /*
73  * Given an address within the vmemmap, determine the pfn of the page that
74  * represents the start of the section it is within.  Note that we have to
75  * do this by hand as the proffered address may not be correctly aligned.
76  * Subtraction of non-aligned pointers produces undefined results.
77  */
78 static unsigned long __meminit vmemmap_section_start(unsigned long page)
79 {
80 	unsigned long offset = page - ((unsigned long)(vmemmap));
81 
82 	/* Return the pfn of the start of the section. */
83 	return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
84 }
85 
86 /*
87  * Check if this vmemmap page is already initialised.  If any section
88  * which overlaps this vmemmap page is initialised then this page is
89  * initialised already.
90  */
91 static int __meminit vmemmap_populated(unsigned long start, int page_size)
92 {
93 	unsigned long end = start + page_size;
94 	start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
95 
96 	for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
97 		if (pfn_valid(page_to_pfn((struct page *)start)))
98 			return 1;
99 
100 	return 0;
101 }
102 
103 /*
104  * vmemmap virtual address space management does not have a traditonal page
105  * table to track which virtual struct pages are backed by physical mapping.
106  * The virtual to physical mappings are tracked in a simple linked list
107  * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
108  * all times where as the 'next' list maintains the available
109  * vmemmap_backing structures which have been deleted from the
110  * 'vmemmap_global' list during system runtime (memory hotplug remove
111  * operation). The freed 'vmemmap_backing' structures are reused later when
112  * new requests come in without allocating fresh memory. This pointer also
113  * tracks the allocated 'vmemmap_backing' structures as we allocate one
114  * full page memory at a time when we dont have any.
115  */
116 struct vmemmap_backing *vmemmap_list;
117 static struct vmemmap_backing *next;
118 
119 /*
120  * The same pointer 'next' tracks individual chunks inside the allocated
121  * full page during the boot time and again tracks the freeed nodes during
122  * runtime. It is racy but it does not happen as they are separated by the
123  * boot process. Will create problem if some how we have memory hotplug
124  * operation during boot !!
125  */
126 static int num_left;
127 static int num_freed;
128 
129 static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
130 {
131 	struct vmemmap_backing *vmem_back;
132 	/* get from freed entries first */
133 	if (num_freed) {
134 		num_freed--;
135 		vmem_back = next;
136 		next = next->list;
137 
138 		return vmem_back;
139 	}
140 
141 	/* allocate a page when required and hand out chunks */
142 	if (!num_left) {
143 		next = vmemmap_alloc_block(PAGE_SIZE, node);
144 		if (unlikely(!next)) {
145 			WARN_ON(1);
146 			return NULL;
147 		}
148 		num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
149 	}
150 
151 	num_left--;
152 
153 	return next++;
154 }
155 
156 static __meminit void vmemmap_list_populate(unsigned long phys,
157 					    unsigned long start,
158 					    int node)
159 {
160 	struct vmemmap_backing *vmem_back;
161 
162 	vmem_back = vmemmap_list_alloc(node);
163 	if (unlikely(!vmem_back)) {
164 		WARN_ON(1);
165 		return;
166 	}
167 
168 	vmem_back->phys = phys;
169 	vmem_back->virt_addr = start;
170 	vmem_back->list = vmemmap_list;
171 
172 	vmemmap_list = vmem_back;
173 }
174 
175 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
176 		struct vmem_altmap *altmap)
177 {
178 	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
179 
180 	/* Align to the page size of the linear mapping. */
181 	start = _ALIGN_DOWN(start, page_size);
182 
183 	pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
184 
185 	for (; start < end; start += page_size) {
186 		void *p = NULL;
187 		int rc;
188 
189 		if (vmemmap_populated(start, page_size))
190 			continue;
191 
192 		/*
193 		 * Allocate from the altmap first if we have one. This may
194 		 * fail due to alignment issues when using 16MB hugepages, so
195 		 * fall back to system memory if the altmap allocation fail.
196 		 */
197 		if (altmap) {
198 			p = altmap_alloc_block_buf(page_size, altmap);
199 			if (!p)
200 				pr_debug("altmap block allocation failed, falling back to system memory");
201 		}
202 		if (!p)
203 			p = vmemmap_alloc_block_buf(page_size, node);
204 		if (!p)
205 			return -ENOMEM;
206 
207 		vmemmap_list_populate(__pa(p), start, node);
208 
209 		pr_debug("      * %016lx..%016lx allocated at %p\n",
210 			 start, start + page_size, p);
211 
212 		rc = vmemmap_create_mapping(start, page_size, __pa(p));
213 		if (rc < 0) {
214 			pr_warn("%s: Unable to create vmemmap mapping: %d\n",
215 				__func__, rc);
216 			return -EFAULT;
217 		}
218 	}
219 
220 	return 0;
221 }
222 
223 #ifdef CONFIG_MEMORY_HOTPLUG
224 static unsigned long vmemmap_list_free(unsigned long start)
225 {
226 	struct vmemmap_backing *vmem_back, *vmem_back_prev;
227 
228 	vmem_back_prev = vmem_back = vmemmap_list;
229 
230 	/* look for it with prev pointer recorded */
231 	for (; vmem_back; vmem_back = vmem_back->list) {
232 		if (vmem_back->virt_addr == start)
233 			break;
234 		vmem_back_prev = vmem_back;
235 	}
236 
237 	if (unlikely(!vmem_back)) {
238 		WARN_ON(1);
239 		return 0;
240 	}
241 
242 	/* remove it from vmemmap_list */
243 	if (vmem_back == vmemmap_list) /* remove head */
244 		vmemmap_list = vmem_back->list;
245 	else
246 		vmem_back_prev->list = vmem_back->list;
247 
248 	/* next point to this freed entry */
249 	vmem_back->list = next;
250 	next = vmem_back;
251 	num_freed++;
252 
253 	return vmem_back->phys;
254 }
255 
256 void __ref vmemmap_free(unsigned long start, unsigned long end,
257 		struct vmem_altmap *altmap)
258 {
259 	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
260 	unsigned long page_order = get_order(page_size);
261 	unsigned long alt_start = ~0, alt_end = ~0;
262 	unsigned long base_pfn;
263 
264 	start = _ALIGN_DOWN(start, page_size);
265 	if (altmap) {
266 		alt_start = altmap->base_pfn;
267 		alt_end = altmap->base_pfn + altmap->reserve +
268 			  altmap->free + altmap->alloc + altmap->align;
269 	}
270 
271 	pr_debug("vmemmap_free %lx...%lx\n", start, end);
272 
273 	for (; start < end; start += page_size) {
274 		unsigned long nr_pages, addr;
275 		struct page *page;
276 
277 		/*
278 		 * the section has already be marked as invalid, so
279 		 * vmemmap_populated() true means some other sections still
280 		 * in this page, so skip it.
281 		 */
282 		if (vmemmap_populated(start, page_size))
283 			continue;
284 
285 		addr = vmemmap_list_free(start);
286 		if (!addr)
287 			continue;
288 
289 		page = pfn_to_page(addr >> PAGE_SHIFT);
290 		nr_pages = 1 << page_order;
291 		base_pfn = PHYS_PFN(addr);
292 
293 		if (base_pfn >= alt_start && base_pfn < alt_end) {
294 			vmem_altmap_free(altmap, nr_pages);
295 		} else if (PageReserved(page)) {
296 			/* allocated from bootmem */
297 			if (page_size < PAGE_SIZE) {
298 				/*
299 				 * this shouldn't happen, but if it is
300 				 * the case, leave the memory there
301 				 */
302 				WARN_ON_ONCE(1);
303 			} else {
304 				while (nr_pages--)
305 					free_reserved_page(page++);
306 			}
307 		} else {
308 			free_pages((unsigned long)(__va(addr)), page_order);
309 		}
310 
311 		vmemmap_remove_mapping(start, page_size);
312 	}
313 }
314 #endif
315 void register_page_bootmem_memmap(unsigned long section_nr,
316 				  struct page *start_page, unsigned long size)
317 {
318 }
319 
320 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
321 
322 #ifdef CONFIG_PPC_BOOK3S_64
323 static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
324 
325 static int __init parse_disable_radix(char *p)
326 {
327 	bool val;
328 
329 	if (!p)
330 		val = true;
331 	else if (kstrtobool(p, &val))
332 		return -EINVAL;
333 
334 	disable_radix = val;
335 
336 	return 0;
337 }
338 early_param("disable_radix", parse_disable_radix);
339 
340 /*
341  * If we're running under a hypervisor, we need to check the contents of
342  * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
343  * radix.  If not, we clear the radix feature bit so we fall back to hash.
344  */
345 static void __init early_check_vec5(void)
346 {
347 	unsigned long root, chosen;
348 	int size;
349 	const u8 *vec5;
350 	u8 mmu_supported;
351 
352 	root = of_get_flat_dt_root();
353 	chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
354 	if (chosen == -FDT_ERR_NOTFOUND) {
355 		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
356 		return;
357 	}
358 	vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
359 	if (!vec5) {
360 		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
361 		return;
362 	}
363 	if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
364 		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
365 		return;
366 	}
367 
368 	/* Check for supported configuration */
369 	mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
370 			OV5_FEAT(OV5_MMU_SUPPORT);
371 	if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
372 		/* Hypervisor only supports radix - check enabled && GTSE */
373 		if (!early_radix_enabled()) {
374 			pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
375 		}
376 		if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
377 						OV5_FEAT(OV5_RADIX_GTSE))) {
378 			pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
379 		}
380 		/* Do radix anyway - the hypervisor said we had to */
381 		cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
382 	} else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
383 		/* Hypervisor only supports hash - disable radix */
384 		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
385 	}
386 }
387 
388 void __init mmu_early_init_devtree(void)
389 {
390 	/* Disable radix mode based on kernel command line. */
391 	if (disable_radix)
392 		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
393 
394 	/*
395 	 * Check /chosen/ibm,architecture-vec-5 if running as a guest.
396 	 * When running bare-metal, we can use radix if we like
397 	 * even though the ibm,architecture-vec-5 property created by
398 	 * skiboot doesn't have the necessary bits set.
399 	 */
400 	if (!(mfmsr() & MSR_HV))
401 		early_check_vec5();
402 
403 	if (early_radix_enabled())
404 		radix__early_init_devtree();
405 	else
406 		hash__early_init_devtree();
407 }
408 #endif /* CONFIG_PPC_BOOK3S_64 */
409