xref: /linux/arch/powerpc/mm/init_64.c (revision cc3d2940133d24000e2866b21e03ce32adfead0a)
170d64ceaSPaul Mackerras /*
270d64ceaSPaul Mackerras  *  PowerPC version
370d64ceaSPaul Mackerras  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
470d64ceaSPaul Mackerras  *
570d64ceaSPaul Mackerras  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
670d64ceaSPaul Mackerras  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
770d64ceaSPaul Mackerras  *    Copyright (C) 1996 Paul Mackerras
870d64ceaSPaul Mackerras  *
970d64ceaSPaul Mackerras  *  Derived from "arch/i386/mm/init.c"
1070d64ceaSPaul Mackerras  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
1170d64ceaSPaul Mackerras  *
1270d64ceaSPaul Mackerras  *  Dave Engebretsen <engebret@us.ibm.com>
1370d64ceaSPaul Mackerras  *      Rework for PPC64 port.
1470d64ceaSPaul Mackerras  *
1570d64ceaSPaul Mackerras  *  This program is free software; you can redistribute it and/or
1670d64ceaSPaul Mackerras  *  modify it under the terms of the GNU General Public License
1770d64ceaSPaul Mackerras  *  as published by the Free Software Foundation; either version
1870d64ceaSPaul Mackerras  *  2 of the License, or (at your option) any later version.
1970d64ceaSPaul Mackerras  *
2070d64ceaSPaul Mackerras  */
2170d64ceaSPaul Mackerras 
22cec08e7aSBenjamin Herrenschmidt #undef DEBUG
23cec08e7aSBenjamin Herrenschmidt 
2470d64ceaSPaul Mackerras #include <linux/signal.h>
2570d64ceaSPaul Mackerras #include <linux/sched.h>
2670d64ceaSPaul Mackerras #include <linux/kernel.h>
2770d64ceaSPaul Mackerras #include <linux/errno.h>
2870d64ceaSPaul Mackerras #include <linux/string.h>
2970d64ceaSPaul Mackerras #include <linux/types.h>
3070d64ceaSPaul Mackerras #include <linux/mman.h>
3170d64ceaSPaul Mackerras #include <linux/mm.h>
3270d64ceaSPaul Mackerras #include <linux/swap.h>
3370d64ceaSPaul Mackerras #include <linux/stddef.h>
3470d64ceaSPaul Mackerras #include <linux/vmalloc.h>
3570d64ceaSPaul Mackerras #include <linux/init.h>
3670d64ceaSPaul Mackerras #include <linux/delay.h>
3770d64ceaSPaul Mackerras #include <linux/highmem.h>
3870d64ceaSPaul Mackerras #include <linux/idr.h>
3970d64ceaSPaul Mackerras #include <linux/nodemask.h>
4070d64ceaSPaul Mackerras #include <linux/module.h>
41c9cf5528SRandy Dunlap #include <linux/poison.h>
4295f72d1eSYinghai Lu #include <linux/memblock.h>
43a4fe3ce7SDavid Gibson #include <linux/hugetlb.h>
445a0e3ad6STejun Heo #include <linux/slab.h>
4518569c1fSPaul Mackerras #include <linux/of_fdt.h>
4618569c1fSPaul Mackerras #include <linux/libfdt.h>
4770d64ceaSPaul Mackerras 
4870d64ceaSPaul Mackerras #include <asm/pgalloc.h>
4970d64ceaSPaul Mackerras #include <asm/page.h>
5070d64ceaSPaul Mackerras #include <asm/prom.h>
5170d64ceaSPaul Mackerras #include <asm/rtas.h>
5270d64ceaSPaul Mackerras #include <asm/io.h>
5370d64ceaSPaul Mackerras #include <asm/mmu_context.h>
5470d64ceaSPaul Mackerras #include <asm/pgtable.h>
5570d64ceaSPaul Mackerras #include <asm/mmu.h>
567c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
5770d64ceaSPaul Mackerras #include <asm/smp.h>
5870d64ceaSPaul Mackerras #include <asm/machdep.h>
5970d64ceaSPaul Mackerras #include <asm/tlb.h>
6070d64ceaSPaul Mackerras #include <asm/eeh.h>
6170d64ceaSPaul Mackerras #include <asm/processor.h>
6270d64ceaSPaul Mackerras #include <asm/mmzone.h>
6370d64ceaSPaul Mackerras #include <asm/cputable.h>
6470d64ceaSPaul Mackerras #include <asm/sections.h>
6570d64ceaSPaul Mackerras #include <asm/iommu.h>
6670d64ceaSPaul Mackerras #include <asm/vdso.h>
67800fc3eeSDavid Gibson 
68800fc3eeSDavid Gibson #include "mmu_decl.h"
6970d64ceaSPaul Mackerras 
7094491685SBenjamin Herrenschmidt #ifdef CONFIG_PPC_STD_MMU_64
71dd1842a2SAneesh Kumar K.V #if H_PGTABLE_RANGE > USER_VSID_RANGE
7270d64ceaSPaul Mackerras #warning Limited user VSID range means pagetable space is wasted
7370d64ceaSPaul Mackerras #endif
7470d64ceaSPaul Mackerras 
75dd1842a2SAneesh Kumar K.V #if (TASK_SIZE_USER64 < H_PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
7670d64ceaSPaul Mackerras #warning TASK_SIZE is smaller than it needs to be.
7770d64ceaSPaul Mackerras #endif
7894491685SBenjamin Herrenschmidt #endif /* CONFIG_PPC_STD_MMU_64 */
7970d64ceaSPaul Mackerras 
8037dd2badSKumar Gala phys_addr_t memstart_addr = ~0;
8179c3095fSSonny Rao EXPORT_SYMBOL_GPL(memstart_addr);
8237dd2badSKumar Gala phys_addr_t kernstart_addr;
8379c3095fSSonny Rao EXPORT_SYMBOL_GPL(kernstart_addr);
84d7917ba7SKumar Gala 
85d29eff7bSAndy Whitcroft #ifdef CONFIG_SPARSEMEM_VMEMMAP
86d29eff7bSAndy Whitcroft /*
87d29eff7bSAndy Whitcroft  * Given an address within the vmemmap, determine the pfn of the page that
88d29eff7bSAndy Whitcroft  * represents the start of the section it is within.  Note that we have to
89d29eff7bSAndy Whitcroft  * do this by hand as the proffered address may not be correctly aligned.
90d29eff7bSAndy Whitcroft  * Subtraction of non-aligned pointers produces undefined results.
91d29eff7bSAndy Whitcroft  */
9209de9ff8SMichael Ellerman static unsigned long __meminit vmemmap_section_start(unsigned long page)
93d29eff7bSAndy Whitcroft {
94d29eff7bSAndy Whitcroft 	unsigned long offset = page - ((unsigned long)(vmemmap));
95d29eff7bSAndy Whitcroft 
96d29eff7bSAndy Whitcroft 	/* Return the pfn of the start of the section. */
97d29eff7bSAndy Whitcroft 	return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
98d29eff7bSAndy Whitcroft }
99d29eff7bSAndy Whitcroft 
100d29eff7bSAndy Whitcroft /*
101d29eff7bSAndy Whitcroft  * Check if this vmemmap page is already initialised.  If any section
102d29eff7bSAndy Whitcroft  * which overlaps this vmemmap page is initialised then this page is
103d29eff7bSAndy Whitcroft  * initialised already.
104d29eff7bSAndy Whitcroft  */
10509de9ff8SMichael Ellerman static int __meminit vmemmap_populated(unsigned long start, int page_size)
106d29eff7bSAndy Whitcroft {
107d29eff7bSAndy Whitcroft 	unsigned long end = start + page_size;
10816a05bffSLi Zhong 	start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
109d29eff7bSAndy Whitcroft 
110d29eff7bSAndy Whitcroft 	for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
11116a05bffSLi Zhong 		if (pfn_valid(page_to_pfn((struct page *)start)))
112d29eff7bSAndy Whitcroft 			return 1;
113d29eff7bSAndy Whitcroft 
114d29eff7bSAndy Whitcroft 	return 0;
115d29eff7bSAndy Whitcroft }
116d29eff7bSAndy Whitcroft 
11791eea67cSMark Nelson struct vmemmap_backing *vmemmap_list;
118bd8cb03dSLi Zhong static struct vmemmap_backing *next;
119bd8cb03dSLi Zhong static int num_left;
120bd8cb03dSLi Zhong static int num_freed;
12191eea67cSMark Nelson 
12291eea67cSMark Nelson static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
12391eea67cSMark Nelson {
124bd8cb03dSLi Zhong 	struct vmemmap_backing *vmem_back;
125bd8cb03dSLi Zhong 	/* get from freed entries first */
126bd8cb03dSLi Zhong 	if (num_freed) {
127bd8cb03dSLi Zhong 		num_freed--;
128bd8cb03dSLi Zhong 		vmem_back = next;
129bd8cb03dSLi Zhong 		next = next->list;
130bd8cb03dSLi Zhong 
131bd8cb03dSLi Zhong 		return vmem_back;
132bd8cb03dSLi Zhong 	}
13391eea67cSMark Nelson 
13491eea67cSMark Nelson 	/* allocate a page when required and hand out chunks */
135bd8cb03dSLi Zhong 	if (!num_left) {
13691eea67cSMark Nelson 		next = vmemmap_alloc_block(PAGE_SIZE, node);
13791eea67cSMark Nelson 		if (unlikely(!next)) {
13891eea67cSMark Nelson 			WARN_ON(1);
13991eea67cSMark Nelson 			return NULL;
14091eea67cSMark Nelson 		}
14191eea67cSMark Nelson 		num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
14291eea67cSMark Nelson 	}
14391eea67cSMark Nelson 
14491eea67cSMark Nelson 	num_left--;
14591eea67cSMark Nelson 
14691eea67cSMark Nelson 	return next++;
14791eea67cSMark Nelson }
14891eea67cSMark Nelson 
14991eea67cSMark Nelson static __meminit void vmemmap_list_populate(unsigned long phys,
15091eea67cSMark Nelson 					    unsigned long start,
15191eea67cSMark Nelson 					    int node)
15291eea67cSMark Nelson {
15391eea67cSMark Nelson 	struct vmemmap_backing *vmem_back;
15491eea67cSMark Nelson 
15591eea67cSMark Nelson 	vmem_back = vmemmap_list_alloc(node);
15691eea67cSMark Nelson 	if (unlikely(!vmem_back)) {
15791eea67cSMark Nelson 		WARN_ON(1);
15891eea67cSMark Nelson 		return;
15991eea67cSMark Nelson 	}
16091eea67cSMark Nelson 
16191eea67cSMark Nelson 	vmem_back->phys = phys;
16291eea67cSMark Nelson 	vmem_back->virt_addr = start;
16391eea67cSMark Nelson 	vmem_back->list = vmemmap_list;
16491eea67cSMark Nelson 
16591eea67cSMark Nelson 	vmemmap_list = vmem_back;
16691eea67cSMark Nelson }
16791eea67cSMark Nelson 
16871b0bfe4SLi Zhong int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
16971b0bfe4SLi Zhong {
17071b0bfe4SLi Zhong 	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
17171b0bfe4SLi Zhong 
17271b0bfe4SLi Zhong 	/* Align to the page size of the linear mapping. */
17371b0bfe4SLi Zhong 	start = _ALIGN_DOWN(start, page_size);
17471b0bfe4SLi Zhong 
17571b0bfe4SLi Zhong 	pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
17671b0bfe4SLi Zhong 
17771b0bfe4SLi Zhong 	for (; start < end; start += page_size) {
17871b0bfe4SLi Zhong 		void *p;
1791dace6c6SDavid Gibson 		int rc;
18071b0bfe4SLi Zhong 
18171b0bfe4SLi Zhong 		if (vmemmap_populated(start, page_size))
18271b0bfe4SLi Zhong 			continue;
18371b0bfe4SLi Zhong 
18471b0bfe4SLi Zhong 		p = vmemmap_alloc_block(page_size, node);
18571b0bfe4SLi Zhong 		if (!p)
18671b0bfe4SLi Zhong 			return -ENOMEM;
18771b0bfe4SLi Zhong 
18871b0bfe4SLi Zhong 		vmemmap_list_populate(__pa(p), start, node);
18971b0bfe4SLi Zhong 
19071b0bfe4SLi Zhong 		pr_debug("      * %016lx..%016lx allocated at %p\n",
19171b0bfe4SLi Zhong 			 start, start + page_size, p);
19271b0bfe4SLi Zhong 
1931dace6c6SDavid Gibson 		rc = vmemmap_create_mapping(start, page_size, __pa(p));
1941dace6c6SDavid Gibson 		if (rc < 0) {
1951dace6c6SDavid Gibson 			pr_warning(
1961dace6c6SDavid Gibson 				"vmemmap_populate: Unable to create vmemmap mapping: %d\n",
1971dace6c6SDavid Gibson 				rc);
1981dace6c6SDavid Gibson 			return -EFAULT;
1991dace6c6SDavid Gibson 		}
20071b0bfe4SLi Zhong 	}
20171b0bfe4SLi Zhong 
20271b0bfe4SLi Zhong 	return 0;
20371b0bfe4SLi Zhong }
20471b0bfe4SLi Zhong 
20571b0bfe4SLi Zhong #ifdef CONFIG_MEMORY_HOTPLUG
206bd8cb03dSLi Zhong static unsigned long vmemmap_list_free(unsigned long start)
207bd8cb03dSLi Zhong {
208bd8cb03dSLi Zhong 	struct vmemmap_backing *vmem_back, *vmem_back_prev;
209bd8cb03dSLi Zhong 
210bd8cb03dSLi Zhong 	vmem_back_prev = vmem_back = vmemmap_list;
211bd8cb03dSLi Zhong 
212bd8cb03dSLi Zhong 	/* look for it with prev pointer recorded */
213bd8cb03dSLi Zhong 	for (; vmem_back; vmem_back = vmem_back->list) {
214bd8cb03dSLi Zhong 		if (vmem_back->virt_addr == start)
215bd8cb03dSLi Zhong 			break;
216bd8cb03dSLi Zhong 		vmem_back_prev = vmem_back;
217bd8cb03dSLi Zhong 	}
218bd8cb03dSLi Zhong 
219bd8cb03dSLi Zhong 	if (unlikely(!vmem_back)) {
220bd8cb03dSLi Zhong 		WARN_ON(1);
221bd8cb03dSLi Zhong 		return 0;
222bd8cb03dSLi Zhong 	}
223bd8cb03dSLi Zhong 
224bd8cb03dSLi Zhong 	/* remove it from vmemmap_list */
225bd8cb03dSLi Zhong 	if (vmem_back == vmemmap_list) /* remove head */
226bd8cb03dSLi Zhong 		vmemmap_list = vmem_back->list;
227bd8cb03dSLi Zhong 	else
228bd8cb03dSLi Zhong 		vmem_back_prev->list = vmem_back->list;
229bd8cb03dSLi Zhong 
230bd8cb03dSLi Zhong 	/* next point to this freed entry */
231bd8cb03dSLi Zhong 	vmem_back->list = next;
232bd8cb03dSLi Zhong 	next = vmem_back;
233bd8cb03dSLi Zhong 	num_freed++;
234bd8cb03dSLi Zhong 
235bd8cb03dSLi Zhong 	return vmem_back->phys;
236bd8cb03dSLi Zhong }
237bd8cb03dSLi Zhong 
23871b0bfe4SLi Zhong void __ref vmemmap_free(unsigned long start, unsigned long end)
239d29eff7bSAndy Whitcroft {
240cec08e7aSBenjamin Herrenschmidt 	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
241d29eff7bSAndy Whitcroft 
242d29eff7bSAndy Whitcroft 	start = _ALIGN_DOWN(start, page_size);
243d29eff7bSAndy Whitcroft 
24471b0bfe4SLi Zhong 	pr_debug("vmemmap_free %lx...%lx\n", start, end);
24532a74949SBenjamin Herrenschmidt 
246d29eff7bSAndy Whitcroft 	for (; start < end; start += page_size) {
24771b0bfe4SLi Zhong 		unsigned long addr;
248d29eff7bSAndy Whitcroft 
24971b0bfe4SLi Zhong 		/*
25071b0bfe4SLi Zhong 		 * the section has already be marked as invalid, so
25171b0bfe4SLi Zhong 		 * vmemmap_populated() true means some other sections still
25271b0bfe4SLi Zhong 		 * in this page, so skip it.
25371b0bfe4SLi Zhong 		 */
254d29eff7bSAndy Whitcroft 		if (vmemmap_populated(start, page_size))
255d29eff7bSAndy Whitcroft 			continue;
256d29eff7bSAndy Whitcroft 
25771b0bfe4SLi Zhong 		addr = vmemmap_list_free(start);
25871b0bfe4SLi Zhong 		if (addr) {
25971b0bfe4SLi Zhong 			struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
260d29eff7bSAndy Whitcroft 
26171b0bfe4SLi Zhong 			if (PageReserved(page)) {
26271b0bfe4SLi Zhong 				/* allocated from bootmem */
26371b0bfe4SLi Zhong 				if (page_size < PAGE_SIZE) {
26471b0bfe4SLi Zhong 					/*
26571b0bfe4SLi Zhong 					 * this shouldn't happen, but if it is
26671b0bfe4SLi Zhong 					 * the case, leave the memory there
26771b0bfe4SLi Zhong 					 */
26871b0bfe4SLi Zhong 					WARN_ON_ONCE(1);
26971b0bfe4SLi Zhong 				} else {
27071b0bfe4SLi Zhong 					unsigned int nr_pages =
27171b0bfe4SLi Zhong 						1 << get_order(page_size);
27271b0bfe4SLi Zhong 					while (nr_pages--)
27371b0bfe4SLi Zhong 						free_reserved_page(page++);
274d29eff7bSAndy Whitcroft 				}
27571b0bfe4SLi Zhong 			} else
27671b0bfe4SLi Zhong 				free_pages((unsigned long)(__va(addr)),
27771b0bfe4SLi Zhong 							get_order(page_size));
278d29eff7bSAndy Whitcroft 
27971b0bfe4SLi Zhong 			vmemmap_remove_mapping(start, page_size);
280d29eff7bSAndy Whitcroft 		}
2810197518cSTang Chen 	}
28271b0bfe4SLi Zhong }
28371b0bfe4SLi Zhong #endif
284f7e3334aSNathan Fontenot void register_page_bootmem_memmap(unsigned long section_nr,
285f7e3334aSNathan Fontenot 				  struct page *start_page, unsigned long size)
286f7e3334aSNathan Fontenot {
287f7e3334aSNathan Fontenot }
288cd3db0c4SBenjamin Herrenschmidt 
2898e0861faSAlexey Kardashevskiy /*
2908e0861faSAlexey Kardashevskiy  * We do not have access to the sparsemem vmemmap, so we fallback to
2918e0861faSAlexey Kardashevskiy  * walking the list of sparsemem blocks which we already maintain for
2928e0861faSAlexey Kardashevskiy  * the sake of crashdump. In the long run, we might want to maintain
2938e0861faSAlexey Kardashevskiy  * a tree if performance of that linear walk becomes a problem.
2948e0861faSAlexey Kardashevskiy  *
2958e0861faSAlexey Kardashevskiy  * realmode_pfn_to_page functions can fail due to:
2968e0861faSAlexey Kardashevskiy  * 1) As real sparsemem blocks do not lay in RAM continously (they
2978e0861faSAlexey Kardashevskiy  * are in virtual address space which is not available in the real mode),
2988e0861faSAlexey Kardashevskiy  * the requested page struct can be split between blocks so get_page/put_page
2998e0861faSAlexey Kardashevskiy  * may fail.
3008e0861faSAlexey Kardashevskiy  * 2) When huge pages are used, the get_page/put_page API will fail
3018e0861faSAlexey Kardashevskiy  * in real mode as the linked addresses in the page struct are virtual
3028e0861faSAlexey Kardashevskiy  * too.
3038e0861faSAlexey Kardashevskiy  */
3048e0861faSAlexey Kardashevskiy struct page *realmode_pfn_to_page(unsigned long pfn)
3058e0861faSAlexey Kardashevskiy {
3068e0861faSAlexey Kardashevskiy 	struct vmemmap_backing *vmem_back;
3078e0861faSAlexey Kardashevskiy 	struct page *page;
3088e0861faSAlexey Kardashevskiy 	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
3098e0861faSAlexey Kardashevskiy 	unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
3108e0861faSAlexey Kardashevskiy 
3118e0861faSAlexey Kardashevskiy 	for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
3128e0861faSAlexey Kardashevskiy 		if (pg_va < vmem_back->virt_addr)
3138e0861faSAlexey Kardashevskiy 			continue;
3148e0861faSAlexey Kardashevskiy 
315bd8cb03dSLi Zhong 		/* After vmemmap_list entry free is possible, need check all */
316bd8cb03dSLi Zhong 		if ((pg_va + sizeof(struct page)) <=
317bd8cb03dSLi Zhong 				(vmem_back->virt_addr + page_size)) {
3188e0861faSAlexey Kardashevskiy 			page = (struct page *) (vmem_back->phys + pg_va -
3198e0861faSAlexey Kardashevskiy 				vmem_back->virt_addr);
3208e0861faSAlexey Kardashevskiy 			return page;
3218e0861faSAlexey Kardashevskiy 		}
322bd8cb03dSLi Zhong 	}
3238e0861faSAlexey Kardashevskiy 
324bd8cb03dSLi Zhong 	/* Probably that page struct is split between real pages */
3258e0861faSAlexey Kardashevskiy 	return NULL;
3268e0861faSAlexey Kardashevskiy }
3278e0861faSAlexey Kardashevskiy EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
3288e0861faSAlexey Kardashevskiy 
3298e0861faSAlexey Kardashevskiy #elif defined(CONFIG_FLATMEM)
3308e0861faSAlexey Kardashevskiy 
3318e0861faSAlexey Kardashevskiy struct page *realmode_pfn_to_page(unsigned long pfn)
3328e0861faSAlexey Kardashevskiy {
3338e0861faSAlexey Kardashevskiy 	struct page *page = pfn_to_page(pfn);
3348e0861faSAlexey Kardashevskiy 	return page;
3358e0861faSAlexey Kardashevskiy }
3368e0861faSAlexey Kardashevskiy EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
3378e0861faSAlexey Kardashevskiy 
3388e0861faSAlexey Kardashevskiy #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
3391a01dc87SMichael Ellerman 
3401a01dc87SMichael Ellerman #ifdef CONFIG_PPC_STD_MMU_64
341c610ec60SMichael Ellerman static bool disable_radix;
342c610ec60SMichael Ellerman static int __init parse_disable_radix(char *p)
343c610ec60SMichael Ellerman {
344c610ec60SMichael Ellerman 	disable_radix = true;
345c610ec60SMichael Ellerman 	return 0;
346c610ec60SMichael Ellerman }
347c610ec60SMichael Ellerman early_param("disable_radix", parse_disable_radix);
348c610ec60SMichael Ellerman 
34918569c1fSPaul Mackerras /*
350*cc3d2940SPaul Mackerras  * If we're running under a hypervisor, we need to check the contents of
351*cc3d2940SPaul Mackerras  * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
352*cc3d2940SPaul Mackerras  * radix.  If not, we clear the radix feature bit so we fall back to hash.
35318569c1fSPaul Mackerras  */
35418569c1fSPaul Mackerras static void early_check_vec5(void)
35518569c1fSPaul Mackerras {
35618569c1fSPaul Mackerras 	unsigned long root, chosen;
35718569c1fSPaul Mackerras 	int size;
35818569c1fSPaul Mackerras 	const u8 *vec5;
35918569c1fSPaul Mackerras 
36018569c1fSPaul Mackerras 	root = of_get_flat_dt_root();
36118569c1fSPaul Mackerras 	chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
36218569c1fSPaul Mackerras 	if (chosen == -FDT_ERR_NOTFOUND)
36318569c1fSPaul Mackerras 		return;
36418569c1fSPaul Mackerras 	vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
36518569c1fSPaul Mackerras 	if (!vec5)
36618569c1fSPaul Mackerras 		return;
367*cc3d2940SPaul Mackerras 	if (size <= OV5_INDX(OV5_MMU_RADIX_300) ||
368*cc3d2940SPaul Mackerras 	    !(vec5[OV5_INDX(OV5_MMU_RADIX_300)] & OV5_FEAT(OV5_MMU_RADIX_300)))
369*cc3d2940SPaul Mackerras 		/* Hypervisor doesn't support radix */
37018569c1fSPaul Mackerras 		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
37118569c1fSPaul Mackerras }
37218569c1fSPaul Mackerras 
3731a01dc87SMichael Ellerman void __init mmu_early_init_devtree(void)
3741a01dc87SMichael Ellerman {
375c610ec60SMichael Ellerman 	/* Disable radix mode based on kernel command line. */
376c610ec60SMichael Ellerman 	if (disable_radix)
3775a25b6f5SAneesh Kumar K.V 		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
378bacf9cf8SMichael Ellerman 
37918569c1fSPaul Mackerras 	/*
38018569c1fSPaul Mackerras 	 * Check /chosen/ibm,architecture-vec-5 if running as a guest.
38118569c1fSPaul Mackerras 	 * When running bare-metal, we can use radix if we like
38218569c1fSPaul Mackerras 	 * even though the ibm,architecture-vec-5 property created by
38318569c1fSPaul Mackerras 	 * skiboot doesn't have the necessary bits set.
38418569c1fSPaul Mackerras 	 */
38518569c1fSPaul Mackerras 	if (early_radix_enabled() && !(mfmsr() & MSR_HV))
38618569c1fSPaul Mackerras 		early_check_vec5();
38718569c1fSPaul Mackerras 
388b8f1b4f8SAneesh Kumar K.V 	if (early_radix_enabled())
3892537b09cSMichael Ellerman 		radix__early_init_devtree();
3902537b09cSMichael Ellerman 	else
391bacf9cf8SMichael Ellerman 		hash__early_init_devtree();
3921a01dc87SMichael Ellerman }
3931a01dc87SMichael Ellerman #endif /* CONFIG_PPC_STD_MMU_64 */
394