1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Provide common bits of early_ioremap() support for architectures needing 4 * temporary mappings during boot before ioremap() is available. 5 * 6 * This is mostly a direct copy of the x86 early_ioremap implementation. 7 * 8 * (C) Copyright 1995 1996, 2014 Linus Torvalds 9 * 10 */ 11 #include <linux/kernel.h> 12 #include <linux/init.h> 13 #include <linux/io.h> 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/mm.h> 17 #include <linux/vmalloc.h> 18 #include <asm/fixmap.h> 19 #include <asm/early_ioremap.h> 20 21 #ifdef CONFIG_MMU 22 static int early_ioremap_debug __initdata; 23 24 static int __init early_ioremap_debug_setup(char *str) 25 { 26 early_ioremap_debug = 1; 27 28 return 0; 29 } 30 early_param("early_ioremap_debug", early_ioremap_debug_setup); 31 32 static int after_paging_init __initdata; 33 34 pgprot_t __init __weak early_memremap_pgprot_adjust(resource_size_t phys_addr, 35 unsigned long size, 36 pgprot_t prot) 37 { 38 return prot; 39 } 40 41 void __init early_ioremap_reset(void) 42 { 43 after_paging_init = 1; 44 } 45 46 /* 47 * Generally, ioremap() is available after paging_init() has been called. 48 * Architectures wanting to allow early_ioremap after paging_init() can 49 * define __late_set_fixmap and __late_clear_fixmap to do the right thing. 50 */ 51 #ifndef __late_set_fixmap 52 static inline void __init __late_set_fixmap(enum fixed_addresses idx, 53 phys_addr_t phys, pgprot_t prot) 54 { 55 BUG(); 56 } 57 #endif 58 59 #ifndef __late_clear_fixmap 60 static inline void __init __late_clear_fixmap(enum fixed_addresses idx) 61 { 62 BUG(); 63 } 64 #endif 65 66 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; 67 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; 68 static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata; 69 70 void __init early_ioremap_setup(void) 71 { 72 int i; 73 74 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) 75 if (WARN_ON(prev_map[i])) 76 break; 77 78 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) 79 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); 80 } 81 82 static int __init check_early_ioremap_leak(void) 83 { 84 int count = 0; 85 int i; 86 87 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) 88 if (prev_map[i]) 89 count++; 90 91 if (WARN(count, KERN_WARNING 92 "Debug warning: early ioremap leak of %d areas detected.\n" 93 "please boot with early_ioremap_debug and report the dmesg.\n", 94 count)) 95 return 1; 96 return 0; 97 } 98 late_initcall(check_early_ioremap_leak); 99 100 static void __init __iomem * 101 __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) 102 { 103 unsigned long offset; 104 resource_size_t last_addr; 105 unsigned int nrpages; 106 enum fixed_addresses idx; 107 int i, slot; 108 109 WARN_ON(system_state >= SYSTEM_RUNNING); 110 111 slot = -1; 112 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { 113 if (!prev_map[i]) { 114 slot = i; 115 break; 116 } 117 } 118 119 if (WARN(slot < 0, "%s(%pa, %08lx) not found slot\n", 120 __func__, &phys_addr, size)) 121 return NULL; 122 123 /* Don't allow wraparound or zero size */ 124 last_addr = phys_addr + size - 1; 125 if (WARN_ON(!size || last_addr < phys_addr)) 126 return NULL; 127 128 prev_size[slot] = size; 129 /* 130 * Mappings have to be page-aligned 131 */ 132 offset = offset_in_page(phys_addr); 133 phys_addr &= PAGE_MASK; 134 size = PAGE_ALIGN(last_addr + 1) - phys_addr; 135 136 /* 137 * Mappings have to fit in the FIX_BTMAP area. 138 */ 139 nrpages = size >> PAGE_SHIFT; 140 if (WARN_ON(nrpages > NR_FIX_BTMAPS)) 141 return NULL; 142 143 /* 144 * Ok, go for it.. 145 */ 146 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; 147 while (nrpages > 0) { 148 if (after_paging_init) 149 __late_set_fixmap(idx, phys_addr, prot); 150 else 151 __early_set_fixmap(idx, phys_addr, prot); 152 phys_addr += PAGE_SIZE; 153 --idx; 154 --nrpages; 155 } 156 WARN(early_ioremap_debug, "%s(%pa, %08lx) [%d] => %08lx + %08lx\n", 157 __func__, &phys_addr, size, slot, offset, slot_virt[slot]); 158 159 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]); 160 return prev_map[slot]; 161 } 162 163 void __init early_iounmap(void __iomem *addr, unsigned long size) 164 { 165 unsigned long virt_addr; 166 unsigned long offset; 167 unsigned int nrpages; 168 enum fixed_addresses idx; 169 int i, slot; 170 171 slot = -1; 172 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { 173 if (prev_map[i] == addr) { 174 slot = i; 175 break; 176 } 177 } 178 179 if (WARN(slot < 0, "%s(%p, %08lx) not found slot\n", 180 __func__, addr, size)) 181 return; 182 183 if (WARN(prev_size[slot] != size, 184 "%s(%p, %08lx) [%d] size not consistent %08lx\n", 185 __func__, addr, size, slot, prev_size[slot])) 186 return; 187 188 WARN(early_ioremap_debug, "%s(%p, %08lx) [%d]\n", 189 __func__, addr, size, slot); 190 191 virt_addr = (unsigned long)addr; 192 if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))) 193 return; 194 195 offset = offset_in_page(virt_addr); 196 nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT; 197 198 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; 199 while (nrpages > 0) { 200 if (after_paging_init) 201 __late_clear_fixmap(idx); 202 else 203 __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR); 204 --idx; 205 --nrpages; 206 } 207 prev_map[slot] = NULL; 208 } 209 210 /* Remap an IO device */ 211 void __init __iomem * 212 early_ioremap(resource_size_t phys_addr, unsigned long size) 213 { 214 return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO); 215 } 216 217 /* Remap memory */ 218 void __init * 219 early_memremap(resource_size_t phys_addr, unsigned long size) 220 { 221 pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size, 222 FIXMAP_PAGE_NORMAL); 223 224 return (__force void *)__early_ioremap(phys_addr, size, prot); 225 } 226 #ifdef FIXMAP_PAGE_RO 227 void __init * 228 early_memremap_ro(resource_size_t phys_addr, unsigned long size) 229 { 230 pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size, 231 FIXMAP_PAGE_RO); 232 233 return (__force void *)__early_ioremap(phys_addr, size, prot); 234 } 235 #endif 236 237 #ifdef CONFIG_ARCH_USE_MEMREMAP_PROT 238 void __init * 239 early_memremap_prot(resource_size_t phys_addr, unsigned long size, 240 unsigned long prot_val) 241 { 242 return (__force void *)__early_ioremap(phys_addr, size, 243 __pgprot(prot_val)); 244 } 245 #endif 246 247 #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) 248 249 void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) 250 { 251 unsigned long slop, clen; 252 char *p; 253 254 while (size) { 255 slop = offset_in_page(src); 256 clen = size; 257 if (clen > MAX_MAP_CHUNK - slop) 258 clen = MAX_MAP_CHUNK - slop; 259 p = early_memremap(src & PAGE_MASK, clen + slop); 260 memcpy(dest, p + slop, clen); 261 early_memunmap(p, clen + slop); 262 dest += clen; 263 src += clen; 264 size -= clen; 265 } 266 } 267 268 #else /* CONFIG_MMU */ 269 270 void __init __iomem * 271 early_ioremap(resource_size_t phys_addr, unsigned long size) 272 { 273 return (__force void __iomem *)phys_addr; 274 } 275 276 /* Remap memory */ 277 void __init * 278 early_memremap(resource_size_t phys_addr, unsigned long size) 279 { 280 return (void *)phys_addr; 281 } 282 void __init * 283 early_memremap_ro(resource_size_t phys_addr, unsigned long size) 284 { 285 return (void *)phys_addr; 286 } 287 288 void __init early_iounmap(void __iomem *addr, unsigned long size) 289 { 290 } 291 292 #endif /* CONFIG_MMU */ 293 294 295 void __init early_memunmap(void *addr, unsigned long size) 296 { 297 early_iounmap((__force void __iomem *)addr, size); 298 } 299