xref: /linux/mm/early_ioremap.c (revision 7b69d79f94d42ac26a5397a07b9d78b066c400aa)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
29e5c33d7SMark Salter /*
39e5c33d7SMark Salter  * Provide common bits of early_ioremap() support for architectures needing
49e5c33d7SMark Salter  * temporary mappings during boot before ioremap() is available.
59e5c33d7SMark Salter  *
69e5c33d7SMark Salter  * This is mostly a direct copy of the x86 early_ioremap implementation.
79e5c33d7SMark Salter  *
89e5c33d7SMark Salter  * (C) Copyright 1995 1996, 2014 Linus Torvalds
99e5c33d7SMark Salter  *
109e5c33d7SMark Salter  */
119e5c33d7SMark Salter #include <linux/kernel.h>
129e5c33d7SMark Salter #include <linux/init.h>
139e5c33d7SMark Salter #include <linux/io.h>
149e5c33d7SMark Salter #include <linux/module.h>
159e5c33d7SMark Salter #include <linux/slab.h>
169e5c33d7SMark Salter #include <linux/mm.h>
179e5c33d7SMark Salter #include <linux/vmalloc.h>
189e5c33d7SMark Salter #include <asm/fixmap.h>
194f1af60bSArd Biesheuvel #include <asm/early_ioremap.h>
209e5c33d7SMark Salter 
219e5c33d7SMark Salter #ifdef CONFIG_MMU
229e5c33d7SMark Salter static int early_ioremap_debug __initdata;
239e5c33d7SMark Salter 
249e5c33d7SMark Salter static int __init early_ioremap_debug_setup(char *str)
259e5c33d7SMark Salter {
269e5c33d7SMark Salter 	early_ioremap_debug = 1;
279e5c33d7SMark Salter 
289e5c33d7SMark Salter 	return 0;
299e5c33d7SMark Salter }
309e5c33d7SMark Salter early_param("early_ioremap_debug", early_ioremap_debug_setup);
319e5c33d7SMark Salter 
329e5c33d7SMark Salter static int after_paging_init __initdata;
339e5c33d7SMark Salter 
348f716c9bSTom Lendacky pgprot_t __init __weak early_memremap_pgprot_adjust(resource_size_t phys_addr,
358f716c9bSTom Lendacky 						    unsigned long size,
368f716c9bSTom Lendacky 						    pgprot_t prot)
378f716c9bSTom Lendacky {
388f716c9bSTom Lendacky 	return prot;
398f716c9bSTom Lendacky }
408f716c9bSTom Lendacky 
419e5c33d7SMark Salter void __init __weak early_ioremap_shutdown(void)
429e5c33d7SMark Salter {
439e5c33d7SMark Salter }
449e5c33d7SMark Salter 
459e5c33d7SMark Salter void __init early_ioremap_reset(void)
469e5c33d7SMark Salter {
479e5c33d7SMark Salter 	early_ioremap_shutdown();
489e5c33d7SMark Salter 	after_paging_init = 1;
499e5c33d7SMark Salter }
509e5c33d7SMark Salter 
519e5c33d7SMark Salter /*
529e5c33d7SMark Salter  * Generally, ioremap() is available after paging_init() has been called.
539e5c33d7SMark Salter  * Architectures wanting to allow early_ioremap after paging_init() can
549e5c33d7SMark Salter  * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
559e5c33d7SMark Salter  */
569e5c33d7SMark Salter #ifndef __late_set_fixmap
579e5c33d7SMark Salter static inline void __init __late_set_fixmap(enum fixed_addresses idx,
589e5c33d7SMark Salter 					    phys_addr_t phys, pgprot_t prot)
599e5c33d7SMark Salter {
609e5c33d7SMark Salter 	BUG();
619e5c33d7SMark Salter }
629e5c33d7SMark Salter #endif
639e5c33d7SMark Salter 
649e5c33d7SMark Salter #ifndef __late_clear_fixmap
659e5c33d7SMark Salter static inline void __init __late_clear_fixmap(enum fixed_addresses idx)
669e5c33d7SMark Salter {
679e5c33d7SMark Salter 	BUG();
689e5c33d7SMark Salter }
699e5c33d7SMark Salter #endif
709e5c33d7SMark Salter 
719e5c33d7SMark Salter static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
729e5c33d7SMark Salter static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
739e5c33d7SMark Salter static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
749e5c33d7SMark Salter 
759e5c33d7SMark Salter void __init early_ioremap_setup(void)
769e5c33d7SMark Salter {
779e5c33d7SMark Salter 	int i;
789e5c33d7SMark Salter 
799e5c33d7SMark Salter 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
809e5c33d7SMark Salter 		if (WARN_ON(prev_map[i]))
819e5c33d7SMark Salter 			break;
829e5c33d7SMark Salter 
839e5c33d7SMark Salter 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
849e5c33d7SMark Salter 		slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
859e5c33d7SMark Salter }
869e5c33d7SMark Salter 
879e5c33d7SMark Salter static int __init check_early_ioremap_leak(void)
889e5c33d7SMark Salter {
899e5c33d7SMark Salter 	int count = 0;
909e5c33d7SMark Salter 	int i;
919e5c33d7SMark Salter 
929e5c33d7SMark Salter 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
939e5c33d7SMark Salter 		if (prev_map[i])
949e5c33d7SMark Salter 			count++;
959e5c33d7SMark Salter 
969e5c33d7SMark Salter 	if (WARN(count, KERN_WARNING
979e5c33d7SMark Salter 		 "Debug warning: early ioremap leak of %d areas detected.\n"
989e5c33d7SMark Salter 		 "please boot with early_ioremap_debug and report the dmesg.\n",
999e5c33d7SMark Salter 		 count))
1009e5c33d7SMark Salter 		return 1;
1019e5c33d7SMark Salter 	return 0;
1029e5c33d7SMark Salter }
1039e5c33d7SMark Salter late_initcall(check_early_ioremap_leak);
1049e5c33d7SMark Salter 
1059e5c33d7SMark Salter static void __init __iomem *
1069e5c33d7SMark Salter __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
1079e5c33d7SMark Salter {
1089e5c33d7SMark Salter 	unsigned long offset;
1099e5c33d7SMark Salter 	resource_size_t last_addr;
1109e5c33d7SMark Salter 	unsigned int nrpages;
1119e5c33d7SMark Salter 	enum fixed_addresses idx;
1129e5c33d7SMark Salter 	int i, slot;
1139e5c33d7SMark Salter 
1147f6f60a1SDave Young 	WARN_ON(system_state >= SYSTEM_RUNNING);
1159e5c33d7SMark Salter 
1169e5c33d7SMark Salter 	slot = -1;
1179e5c33d7SMark Salter 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
1189e5c33d7SMark Salter 		if (!prev_map[i]) {
1199e5c33d7SMark Salter 			slot = i;
1209e5c33d7SMark Salter 			break;
1219e5c33d7SMark Salter 		}
1229e5c33d7SMark Salter 	}
1239e5c33d7SMark Salter 
124*7b69d79fSAndy Shevchenko 	if (WARN(slot < 0, "%s(%pa, %08lx) not found slot\n",
125*7b69d79fSAndy Shevchenko 		 __func__, &phys_addr, size))
1269e5c33d7SMark Salter 		return NULL;
1279e5c33d7SMark Salter 
1289e5c33d7SMark Salter 	/* Don't allow wraparound or zero size */
1299e5c33d7SMark Salter 	last_addr = phys_addr + size - 1;
1309e5c33d7SMark Salter 	if (WARN_ON(!size || last_addr < phys_addr))
1319e5c33d7SMark Salter 		return NULL;
1329e5c33d7SMark Salter 
1339e5c33d7SMark Salter 	prev_size[slot] = size;
1349e5c33d7SMark Salter 	/*
1359e5c33d7SMark Salter 	 * Mappings have to be page-aligned
1369e5c33d7SMark Salter 	 */
1375d57b014SAlexander Kuleshov 	offset = offset_in_page(phys_addr);
1389e5c33d7SMark Salter 	phys_addr &= PAGE_MASK;
1399e5c33d7SMark Salter 	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
1409e5c33d7SMark Salter 
1419e5c33d7SMark Salter 	/*
1429e5c33d7SMark Salter 	 * Mappings have to fit in the FIX_BTMAP area.
1439e5c33d7SMark Salter 	 */
1449e5c33d7SMark Salter 	nrpages = size >> PAGE_SHIFT;
1459e5c33d7SMark Salter 	if (WARN_ON(nrpages > NR_FIX_BTMAPS))
1469e5c33d7SMark Salter 		return NULL;
1479e5c33d7SMark Salter 
1489e5c33d7SMark Salter 	/*
1499e5c33d7SMark Salter 	 * Ok, go for it..
1509e5c33d7SMark Salter 	 */
1519e5c33d7SMark Salter 	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
1529e5c33d7SMark Salter 	while (nrpages > 0) {
1539e5c33d7SMark Salter 		if (after_paging_init)
1549e5c33d7SMark Salter 			__late_set_fixmap(idx, phys_addr, prot);
1559e5c33d7SMark Salter 		else
1569e5c33d7SMark Salter 			__early_set_fixmap(idx, phys_addr, prot);
1579e5c33d7SMark Salter 		phys_addr += PAGE_SIZE;
1589e5c33d7SMark Salter 		--idx;
1599e5c33d7SMark Salter 		--nrpages;
1609e5c33d7SMark Salter 	}
161*7b69d79fSAndy Shevchenko 	WARN(early_ioremap_debug, "%s(%pa, %08lx) [%d] => %08lx + %08lx\n",
162*7b69d79fSAndy Shevchenko 	     __func__, &phys_addr, size, slot, offset, slot_virt[slot]);
1639e5c33d7SMark Salter 
1649e5c33d7SMark Salter 	prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
1659e5c33d7SMark Salter 	return prev_map[slot];
1669e5c33d7SMark Salter }
1679e5c33d7SMark Salter 
1689e5c33d7SMark Salter void __init early_iounmap(void __iomem *addr, unsigned long size)
1699e5c33d7SMark Salter {
1709e5c33d7SMark Salter 	unsigned long virt_addr;
1719e5c33d7SMark Salter 	unsigned long offset;
1729e5c33d7SMark Salter 	unsigned int nrpages;
1739e5c33d7SMark Salter 	enum fixed_addresses idx;
1749e5c33d7SMark Salter 	int i, slot;
1759e5c33d7SMark Salter 
1769e5c33d7SMark Salter 	slot = -1;
1779e5c33d7SMark Salter 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
1789e5c33d7SMark Salter 		if (prev_map[i] == addr) {
1799e5c33d7SMark Salter 			slot = i;
1809e5c33d7SMark Salter 			break;
1819e5c33d7SMark Salter 		}
1829e5c33d7SMark Salter 	}
1839e5c33d7SMark Salter 
1849e5c33d7SMark Salter 	if (WARN(slot < 0, "early_iounmap(%p, %08lx) not found slot\n",
1859e5c33d7SMark Salter 		 addr, size))
1869e5c33d7SMark Salter 		return;
1879e5c33d7SMark Salter 
1889e5c33d7SMark Salter 	if (WARN(prev_size[slot] != size,
1899e5c33d7SMark Salter 		 "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
1909e5c33d7SMark Salter 		 addr, size, slot, prev_size[slot]))
1919e5c33d7SMark Salter 		return;
1929e5c33d7SMark Salter 
1939e5c33d7SMark Salter 	WARN(early_ioremap_debug, "early_iounmap(%p, %08lx) [%d]\n",
1949e5c33d7SMark Salter 	     addr, size, slot);
1959e5c33d7SMark Salter 
1969e5c33d7SMark Salter 	virt_addr = (unsigned long)addr;
1979e5c33d7SMark Salter 	if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)))
1989e5c33d7SMark Salter 		return;
1999e5c33d7SMark Salter 
2005d57b014SAlexander Kuleshov 	offset = offset_in_page(virt_addr);
2019e5c33d7SMark Salter 	nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
2029e5c33d7SMark Salter 
2039e5c33d7SMark Salter 	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
2049e5c33d7SMark Salter 	while (nrpages > 0) {
2059e5c33d7SMark Salter 		if (after_paging_init)
2069e5c33d7SMark Salter 			__late_clear_fixmap(idx);
2079e5c33d7SMark Salter 		else
2089e5c33d7SMark Salter 			__early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR);
2099e5c33d7SMark Salter 		--idx;
2109e5c33d7SMark Salter 		--nrpages;
2119e5c33d7SMark Salter 	}
2129e5c33d7SMark Salter 	prev_map[slot] = NULL;
2139e5c33d7SMark Salter }
2149e5c33d7SMark Salter 
2159e5c33d7SMark Salter /* Remap an IO device */
2169e5c33d7SMark Salter void __init __iomem *
2179e5c33d7SMark Salter early_ioremap(resource_size_t phys_addr, unsigned long size)
2189e5c33d7SMark Salter {
2199e5c33d7SMark Salter 	return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO);
2209e5c33d7SMark Salter }
2219e5c33d7SMark Salter 
2229e5c33d7SMark Salter /* Remap memory */
2239e5c33d7SMark Salter void __init *
2249e5c33d7SMark Salter early_memremap(resource_size_t phys_addr, unsigned long size)
2259e5c33d7SMark Salter {
2268f716c9bSTom Lendacky 	pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
2279e5c33d7SMark Salter 						     FIXMAP_PAGE_NORMAL);
2288f716c9bSTom Lendacky 
2298f716c9bSTom Lendacky 	return (__force void *)__early_ioremap(phys_addr, size, prot);
2309e5c33d7SMark Salter }
2312592dbbbSJuergen Gross #ifdef FIXMAP_PAGE_RO
2322592dbbbSJuergen Gross void __init *
2332592dbbbSJuergen Gross early_memremap_ro(resource_size_t phys_addr, unsigned long size)
2342592dbbbSJuergen Gross {
2358f716c9bSTom Lendacky 	pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
2368f716c9bSTom Lendacky 						     FIXMAP_PAGE_RO);
2378f716c9bSTom Lendacky 
2388f716c9bSTom Lendacky 	return (__force void *)__early_ioremap(phys_addr, size, prot);
2392592dbbbSJuergen Gross }
2402592dbbbSJuergen Gross #endif
2416b0f68e3SMark Salter 
242f88a68faSTom Lendacky #ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
243f88a68faSTom Lendacky void __init *
244f88a68faSTom Lendacky early_memremap_prot(resource_size_t phys_addr, unsigned long size,
245f88a68faSTom Lendacky 		    unsigned long prot_val)
246f88a68faSTom Lendacky {
247f88a68faSTom Lendacky 	return (__force void *)__early_ioremap(phys_addr, size,
248f88a68faSTom Lendacky 					       __pgprot(prot_val));
249f88a68faSTom Lendacky }
250f88a68faSTom Lendacky #endif
251f88a68faSTom Lendacky 
2526b0f68e3SMark Salter #define MAX_MAP_CHUNK	(NR_FIX_BTMAPS << PAGE_SHIFT)
2536b0f68e3SMark Salter 
2546b0f68e3SMark Salter void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size)
2556b0f68e3SMark Salter {
2566b0f68e3SMark Salter 	unsigned long slop, clen;
2576b0f68e3SMark Salter 	char *p;
2586b0f68e3SMark Salter 
2596b0f68e3SMark Salter 	while (size) {
2605d57b014SAlexander Kuleshov 		slop = offset_in_page(src);
2616b0f68e3SMark Salter 		clen = size;
2626b0f68e3SMark Salter 		if (clen > MAX_MAP_CHUNK - slop)
2636b0f68e3SMark Salter 			clen = MAX_MAP_CHUNK - slop;
2646b0f68e3SMark Salter 		p = early_memremap(src & PAGE_MASK, clen + slop);
2656b0f68e3SMark Salter 		memcpy(dest, p + slop, clen);
2666b0f68e3SMark Salter 		early_memunmap(p, clen + slop);
2676b0f68e3SMark Salter 		dest += clen;
2686b0f68e3SMark Salter 		src += clen;
2696b0f68e3SMark Salter 		size -= clen;
2706b0f68e3SMark Salter 	}
2716b0f68e3SMark Salter }
2726b0f68e3SMark Salter 
2739e5c33d7SMark Salter #else /* CONFIG_MMU */
2749e5c33d7SMark Salter 
2759e5c33d7SMark Salter void __init __iomem *
2769e5c33d7SMark Salter early_ioremap(resource_size_t phys_addr, unsigned long size)
2779e5c33d7SMark Salter {
2789e5c33d7SMark Salter 	return (__force void __iomem *)phys_addr;
2799e5c33d7SMark Salter }
2809e5c33d7SMark Salter 
2819e5c33d7SMark Salter /* Remap memory */
2829e5c33d7SMark Salter void __init *
2839e5c33d7SMark Salter early_memremap(resource_size_t phys_addr, unsigned long size)
2849e5c33d7SMark Salter {
2859e5c33d7SMark Salter 	return (void *)phys_addr;
2869e5c33d7SMark Salter }
2872592dbbbSJuergen Gross void __init *
2882592dbbbSJuergen Gross early_memremap_ro(resource_size_t phys_addr, unsigned long size)
2892592dbbbSJuergen Gross {
2902592dbbbSJuergen Gross 	return (void *)phys_addr;
2912592dbbbSJuergen Gross }
2929e5c33d7SMark Salter 
2939e5c33d7SMark Salter void __init early_iounmap(void __iomem *addr, unsigned long size)
2949e5c33d7SMark Salter {
2959e5c33d7SMark Salter }
2969e5c33d7SMark Salter 
2979e5c33d7SMark Salter #endif /* CONFIG_MMU */
2989e5c33d7SMark Salter 
2999e5c33d7SMark Salter 
3009e5c33d7SMark Salter void __init early_memunmap(void *addr, unsigned long size)
3019e5c33d7SMark Salter {
3029e5c33d7SMark Salter 	early_iounmap((__force void __iomem *)addr, size);
3039e5c33d7SMark Salter }
304