1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * OpenRISC ioremap.c 4 * 5 * Linux architectural port borrowing liberally from similar works of 6 * others. All original copyrights apply as per the original source 7 * declaration. 8 * 9 * Modifications for the OpenRISC architecture: 10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> 11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> 12 */ 13 14 #include <linux/vmalloc.h> 15 #include <linux/io.h> 16 #include <asm/pgalloc.h> 17 #include <asm/kmap_types.h> 18 #include <asm/fixmap.h> 19 #include <asm/bug.h> 20 #include <asm/pgtable.h> 21 #include <linux/sched.h> 22 #include <asm/tlbflush.h> 23 24 extern int mem_init_done; 25 26 static unsigned int fixmaps_used __initdata; 27 28 /* 29 * Remap an arbitrary physical address space into the kernel virtual 30 * address space. Needed when the kernel wants to access high addresses 31 * directly. 32 * 33 * NOTE! We need to allow non-page-aligned mappings too: we will obviously 34 * have to convert them into an offset in a page-aligned mapping, but the 35 * caller shouldn't need to know that small detail. 36 */ 37 void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size) 38 { 39 phys_addr_t p; 40 unsigned long v; 41 unsigned long offset, last_addr; 42 struct vm_struct *area = NULL; 43 44 /* Don't allow wraparound or zero size */ 45 last_addr = addr + size - 1; 46 if (!size || last_addr < addr) 47 return NULL; 48 49 /* 50 * Mappings have to be page-aligned 51 */ 52 offset = addr & ~PAGE_MASK; 53 p = addr & PAGE_MASK; 54 size = PAGE_ALIGN(last_addr + 1) - p; 55 56 if (likely(mem_init_done)) { 57 area = get_vm_area(size, VM_IOREMAP); 58 if (!area) 59 return NULL; 60 v = (unsigned long)area->addr; 61 } else { 62 if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS) 63 return NULL; 64 v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used); 65 fixmaps_used += (size >> PAGE_SHIFT); 66 } 67 68 if (ioremap_page_range(v, v + size, p, 69 __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI))) { 70 if (likely(mem_init_done)) 71 vfree(area->addr); 72 else 73 fixmaps_used -= (size >> PAGE_SHIFT); 74 return NULL; 75 } 76 77 return (void __iomem *)(offset + (char *)v); 78 } 79 EXPORT_SYMBOL(ioremap); 80 81 void iounmap(void *addr) 82 { 83 /* If the page is from the fixmap pool then we just clear out 84 * the fixmap mapping. 85 */ 86 if (unlikely((unsigned long)addr > FIXADDR_START)) { 87 /* This is a bit broken... we don't really know 88 * how big the area is so it's difficult to know 89 * how many fixed pages to invalidate... 90 * just flush tlb and hope for the best... 91 * consider this a FIXME 92 * 93 * Really we should be clearing out one or more page 94 * table entries for these virtual addresses so that 95 * future references cause a page fault... for now, we 96 * rely on two things: 97 * i) this code never gets called on known boards 98 * ii) invalid accesses to the freed areas aren't made 99 */ 100 flush_tlb_all(); 101 return; 102 } 103 104 return vfree((void *)(PAGE_MASK & (unsigned long)addr)); 105 } 106 EXPORT_SYMBOL(iounmap); 107 108 /** 109 * OK, this one's a bit tricky... ioremap can get called before memory is 110 * initialized (early serial console does this) and will want to alloc a page 111 * for its mapping. No userspace pages will ever get allocated before memory 112 * is initialized so this applies only to kernel pages. In the event that 113 * this is called before memory is initialized we allocate the page using 114 * the memblock infrastructure. 115 */ 116 117 pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm) 118 { 119 pte_t *pte; 120 121 if (likely(mem_init_done)) { 122 pte = (pte_t *)get_zeroed_page(GFP_KERNEL); 123 } else { 124 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 125 if (!pte) 126 panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 127 __func__, PAGE_SIZE, PAGE_SIZE); 128 } 129 130 return pte; 131 } 132