1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/arm/mm/ioremap.c 4 * 5 * Re-map IO memory to kernel address space so that we can access it. 6 * 7 * (C) Copyright 1995 1996 Linus Torvalds 8 * 9 * Hacked for ARM by Phil Blundell <philb@gnu.org> 10 * Hacked to allow all architectures to build, and various cleanups 11 * by Russell King 12 * 13 * This allows a driver to remap an arbitrary region of bus memory into 14 * virtual space. One should *only* use readl, writel, memcpy_toio and 15 * so on with such remapped areas. 16 * 17 * Because the ARM only has a 32-bit address space we can't address the 18 * whole of the (physical) PCI space at once. PCI huge-mode addressing 19 * allows us to circumvent this restriction by splitting PCI space into 20 * two 2GB chunks and mapping only one at a time into processor memory. 21 * We use MMU protection domains to trap any attempt to access the bank 22 * that is not currently mapped. (This isn't fully implemented yet.) 23 */ 24 #include <linux/module.h> 25 #include <linux/errno.h> 26 #include <linux/kasan.h> 27 #include <linux/mm.h> 28 #include <linux/vmalloc.h> 29 #include <linux/io.h> 30 #include <linux/sizes.h> 31 #include <linux/memblock.h> 32 33 #include <asm/cp15.h> 34 #include <asm/cputype.h> 35 #include <asm/cacheflush.h> 36 #include <asm/early_ioremap.h> 37 #include <asm/mmu_context.h> 38 #include <asm/pgalloc.h> 39 #include <asm/tlbflush.h> 40 #include <asm/set_memory.h> 41 #include <asm/system_info.h> 42 43 #include <asm/mach/map.h> 44 #include <asm/mach/pci.h> 45 #include "mm.h" 46 47 48 LIST_HEAD(static_vmlist); 49 50 static struct static_vm *find_static_vm_paddr(phys_addr_t paddr, 51 size_t size, unsigned int mtype) 52 { 53 struct static_vm *svm; 54 struct vm_struct *vm; 55 56 list_for_each_entry(svm, &static_vmlist, list) { 57 vm = &svm->vm; 58 if (!(vm->flags & VM_ARM_STATIC_MAPPING)) 59 continue; 60 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) 61 continue; 62 63 if (vm->phys_addr > paddr || 64 paddr + size - 1 > vm->phys_addr + vm->size - 1) 65 continue; 66 67 return svm; 68 } 69 70 return NULL; 71 } 72 73 struct static_vm *find_static_vm_vaddr(void *vaddr) 74 { 75 struct static_vm *svm; 76 struct vm_struct *vm; 77 78 list_for_each_entry(svm, &static_vmlist, list) { 79 vm = &svm->vm; 80 81 /* static_vmlist is ascending order */ 82 if (vm->addr > vaddr) 83 break; 84 85 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr) 86 return svm; 87 } 88 89 return NULL; 90 } 91 92 void __init add_static_vm_early(struct static_vm *svm) 93 { 94 struct static_vm *curr_svm; 95 struct vm_struct *vm; 96 void *vaddr; 97 98 vm = &svm->vm; 99 vm_area_add_early(vm); 100 vaddr = vm->addr; 101 102 list_for_each_entry(curr_svm, &static_vmlist, list) { 103 vm = &curr_svm->vm; 104 105 if (vm->addr > vaddr) 106 break; 107 } 108 list_add_tail(&svm->list, &curr_svm->list); 109 } 110 111 int ioremap_page(unsigned long virt, unsigned long phys, 112 const struct mem_type *mtype) 113 { 114 return vmap_page_range(virt, virt + PAGE_SIZE, phys, 115 __pgprot(mtype->prot_pte)); 116 } 117 EXPORT_SYMBOL(ioremap_page); 118 119 #ifdef CONFIG_KASAN 120 static unsigned long arm_kasan_mem_to_shadow(unsigned long addr) 121 { 122 return (unsigned long)kasan_mem_to_shadow((void *)addr); 123 } 124 #else 125 static unsigned long arm_kasan_mem_to_shadow(unsigned long addr) 126 { 127 return 0; 128 } 129 #endif 130 131 static void memcpy_pgd(struct mm_struct *mm, unsigned long start, 132 unsigned long end) 133 { 134 end = ALIGN(end, PGDIR_SIZE); 135 memcpy(pgd_offset(mm, start), pgd_offset_k(start), 136 sizeof(pgd_t) * (pgd_index(end) - pgd_index(start))); 137 } 138 139 void __check_vmalloc_seq(struct mm_struct *mm) 140 { 141 int seq; 142 143 do { 144 seq = atomic_read_acquire(&init_mm.context.vmalloc_seq); 145 memcpy_pgd(mm, VMALLOC_START, VMALLOC_END); 146 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { 147 unsigned long start = 148 arm_kasan_mem_to_shadow(VMALLOC_START); 149 unsigned long end = 150 arm_kasan_mem_to_shadow(VMALLOC_END); 151 memcpy_pgd(mm, start, end); 152 } 153 /* 154 * Use a store-release so that other CPUs that observe the 155 * counter's new value are guaranteed to see the results of the 156 * memcpy as well. 157 */ 158 atomic_set_release(&mm->context.vmalloc_seq, seq); 159 } while (seq != atomic_read(&init_mm.context.vmalloc_seq)); 160 } 161 162 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 163 /* 164 * Section support is unsafe on SMP - If you iounmap and ioremap a region, 165 * the other CPUs will not see this change until their next context switch. 166 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs 167 * which requires the new ioremap'd region to be referenced, the CPU will 168 * reference the _old_ region. 169 * 170 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to 171 * mask the size back to 1MB aligned or we will overflow in the loop below. 172 */ 173 static void unmap_area_sections(unsigned long virt, unsigned long size) 174 { 175 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); 176 pmd_t *pmdp = pmd_off_k(addr); 177 178 do { 179 pmd_t pmd = *pmdp; 180 181 if (!pmd_none(pmd)) { 182 /* 183 * Clear the PMD from the page table, and 184 * increment the vmalloc sequence so others 185 * notice this change. 186 * 187 * Note: this is still racy on SMP machines. 188 */ 189 pmd_clear(pmdp); 190 atomic_inc_return_release(&init_mm.context.vmalloc_seq); 191 192 /* 193 * Free the page table, if there was one. 194 */ 195 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) 196 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); 197 } 198 199 addr += PMD_SIZE; 200 pmdp += 2; 201 } while (addr < end); 202 203 /* 204 * Ensure that the active_mm is up to date - we want to 205 * catch any use-after-iounmap cases. 206 */ 207 check_vmalloc_seq(current->active_mm); 208 209 flush_tlb_kernel_range(virt, end); 210 } 211 212 static int 213 remap_area_sections(unsigned long virt, unsigned long pfn, 214 size_t size, const struct mem_type *type) 215 { 216 unsigned long addr = virt, end = virt + size; 217 pmd_t *pmd = pmd_off_k(addr); 218 219 /* 220 * Remove and free any PTE-based mapping, and 221 * sync the current kernel mapping. 222 */ 223 unmap_area_sections(virt, size); 224 225 do { 226 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); 227 pfn += SZ_1M >> PAGE_SHIFT; 228 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); 229 pfn += SZ_1M >> PAGE_SHIFT; 230 flush_pmd_entry(pmd); 231 232 addr += PMD_SIZE; 233 pmd += 2; 234 } while (addr < end); 235 236 return 0; 237 } 238 239 static int 240 remap_area_supersections(unsigned long virt, unsigned long pfn, 241 size_t size, const struct mem_type *type) 242 { 243 unsigned long addr = virt, end = virt + size; 244 pmd_t *pmd = pmd_off_k(addr); 245 246 /* 247 * Remove and free any PTE-based mapping, and 248 * sync the current kernel mapping. 249 */ 250 unmap_area_sections(virt, size); 251 do { 252 unsigned long super_pmd_val, i; 253 254 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect | 255 PMD_SECT_SUPER; 256 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; 257 258 for (i = 0; i < 8; i++) { 259 pmd[0] = __pmd(super_pmd_val); 260 pmd[1] = __pmd(super_pmd_val); 261 flush_pmd_entry(pmd); 262 263 addr += PMD_SIZE; 264 pmd += 2; 265 } 266 267 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; 268 } while (addr < end); 269 270 return 0; 271 } 272 #endif 273 274 static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, 275 unsigned long offset, size_t size, unsigned int mtype, void *caller) 276 { 277 const struct mem_type *type; 278 int err; 279 unsigned long addr; 280 struct vm_struct *area; 281 phys_addr_t paddr = __pfn_to_phys(pfn); 282 283 #ifndef CONFIG_ARM_LPAE 284 /* 285 * High mappings must be supersection aligned 286 */ 287 if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK)) 288 return NULL; 289 #endif 290 291 type = get_mem_type(mtype); 292 if (!type) 293 return NULL; 294 295 /* 296 * Page align the mapping size, taking account of any offset. 297 */ 298 size = PAGE_ALIGN(offset + size); 299 300 /* 301 * Try to reuse one of the static mapping whenever possible. 302 */ 303 if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) { 304 struct static_vm *svm; 305 306 svm = find_static_vm_paddr(paddr, size, mtype); 307 if (svm) { 308 addr = (unsigned long)svm->vm.addr; 309 addr += paddr - svm->vm.phys_addr; 310 return (void __iomem *) (offset + addr); 311 } 312 } 313 314 /* 315 * Don't allow RAM to be mapped with mismatched attributes - this 316 * causes problems with ARMv6+ 317 */ 318 if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) && 319 mtype != MT_MEMORY_RW)) 320 return NULL; 321 322 area = get_vm_area_caller(size, VM_IOREMAP, caller); 323 if (!area) 324 return NULL; 325 addr = (unsigned long)area->addr; 326 area->phys_addr = paddr; 327 328 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 329 if (DOMAIN_IO == 0 && 330 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || 331 cpu_is_xsc3()) && pfn >= 0x100000 && 332 !((paddr | size | addr) & ~SUPERSECTION_MASK)) { 333 area->flags |= VM_ARM_SECTION_MAPPING; 334 err = remap_area_supersections(addr, pfn, size, type); 335 } else if (!((paddr | size | addr) & ~PMD_MASK)) { 336 area->flags |= VM_ARM_SECTION_MAPPING; 337 err = remap_area_sections(addr, pfn, size, type); 338 } else 339 #endif 340 err = ioremap_page_range(addr, addr + size, paddr, 341 __pgprot(type->prot_pte)); 342 343 if (err) { 344 vunmap((void *)addr); 345 return NULL; 346 } 347 348 flush_cache_vmap(addr, addr + size); 349 return (void __iomem *) (offset + addr); 350 } 351 352 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, 353 unsigned int mtype, void *caller) 354 { 355 phys_addr_t last_addr; 356 unsigned long offset = phys_addr & ~PAGE_MASK; 357 unsigned long pfn = __phys_to_pfn(phys_addr); 358 359 /* 360 * Don't allow wraparound or zero size 361 */ 362 last_addr = phys_addr + size - 1; 363 if (!size || last_addr < phys_addr) 364 return NULL; 365 366 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, 367 caller); 368 } 369 370 /* 371 * Remap an arbitrary physical address space into the kernel virtual 372 * address space. Needed when the kernel wants to access high addresses 373 * directly. 374 * 375 * NOTE! We need to allow non-page-aligned mappings too: we will obviously 376 * have to convert them into an offset in a page-aligned mapping, but the 377 * caller shouldn't need to know that small detail. 378 */ 379 void __iomem * 380 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, 381 unsigned int mtype) 382 { 383 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, 384 __builtin_return_address(0)); 385 } 386 EXPORT_SYMBOL(__arm_ioremap_pfn); 387 388 void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, 389 unsigned int, void *) = 390 __arm_ioremap_caller; 391 392 void __iomem *ioremap(resource_size_t res_cookie, size_t size) 393 { 394 return arch_ioremap_caller(res_cookie, size, MT_DEVICE, 395 __builtin_return_address(0)); 396 } 397 EXPORT_SYMBOL(ioremap); 398 399 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) 400 { 401 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, 402 __builtin_return_address(0)); 403 } 404 EXPORT_SYMBOL(ioremap_cache); 405 406 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) 407 { 408 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC, 409 __builtin_return_address(0)); 410 } 411 EXPORT_SYMBOL(ioremap_wc); 412 413 /* 414 * Remap an arbitrary physical address space into the kernel virtual 415 * address space as memory. Needed when the kernel wants to execute 416 * code in external memory. This is needed for reprogramming source 417 * clocks that would affect normal memory for example. Please see 418 * CONFIG_GENERIC_ALLOCATOR for allocating external memory. 419 */ 420 void __iomem * 421 __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) 422 { 423 unsigned int mtype; 424 425 if (cached) 426 mtype = MT_MEMORY_RWX; 427 else 428 mtype = MT_MEMORY_RWX_NONCACHED; 429 430 return __arm_ioremap_caller(phys_addr, size, mtype, 431 __builtin_return_address(0)); 432 } 433 434 void __arm_iomem_set_ro(void __iomem *ptr, size_t size) 435 { 436 set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE); 437 } 438 439 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) 440 { 441 return (__force void *)arch_ioremap_caller(phys_addr, size, 442 MT_MEMORY_RW, 443 __builtin_return_address(0)); 444 } 445 446 void iounmap(volatile void __iomem *io_addr) 447 { 448 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); 449 struct static_vm *svm; 450 451 /* If this is a static mapping, we must leave it alone */ 452 svm = find_static_vm_vaddr(addr); 453 if (svm) 454 return; 455 456 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 457 { 458 struct vm_struct *vm; 459 460 vm = find_vm_area(addr); 461 462 /* 463 * If this is a section based mapping we need to handle it 464 * specially as the VM subsystem does not know how to handle 465 * such a beast. 466 */ 467 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING)) 468 unmap_area_sections((unsigned long)vm->addr, vm->size); 469 } 470 #endif 471 472 vunmap(addr); 473 } 474 EXPORT_SYMBOL(iounmap); 475 476 #if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA) 477 static int pci_ioremap_mem_type = MT_DEVICE; 478 479 void pci_ioremap_set_mem_type(int mem_type) 480 { 481 pci_ioremap_mem_type = mem_type; 482 } 483 484 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) 485 { 486 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; 487 488 if (!(res->flags & IORESOURCE_IO)) 489 return -EINVAL; 490 491 if (res->end > IO_SPACE_LIMIT) 492 return -EINVAL; 493 494 return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr, 495 __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte)); 496 } 497 EXPORT_SYMBOL(pci_remap_iospace); 498 499 void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size) 500 { 501 return arch_ioremap_caller(res_cookie, size, MT_UNCACHED, 502 __builtin_return_address(0)); 503 } 504 EXPORT_SYMBOL_GPL(pci_remap_cfgspace); 505 #endif 506 507 /* 508 * Must be called after early_fixmap_init 509 */ 510 void __init early_ioremap_init(void) 511 { 512 early_ioremap_setup(); 513 } 514 515 bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, 516 unsigned long flags) 517 { 518 unsigned long pfn = PHYS_PFN(offset); 519 520 return memblock_is_map_memory(pfn); 521 } 522