1 /* 2 * linux/arch/arm/mm/ioremap.c 3 * 4 * Re-map IO memory to kernel address space so that we can access it. 5 * 6 * (C) Copyright 1995 1996 Linus Torvalds 7 * 8 * Hacked for ARM by Phil Blundell <philb@gnu.org> 9 * Hacked to allow all architectures to build, and various cleanups 10 * by Russell King 11 * 12 * This allows a driver to remap an arbitrary region of bus memory into 13 * virtual space. One should *only* use readl, writel, memcpy_toio and 14 * so on with such remapped areas. 15 * 16 * Because the ARM only has a 32-bit address space we can't address the 17 * whole of the (physical) PCI space at once. PCI huge-mode addressing 18 * allows us to circumvent this restriction by splitting PCI space into 19 * two 2GB chunks and mapping only one at a time into processor memory. 20 * We use MMU protection domains to trap any attempt to access the bank 21 * that is not currently mapped. (This isn't fully implemented yet.) 22 */ 23 #include <linux/module.h> 24 #include <linux/errno.h> 25 #include <linux/mm.h> 26 #include <linux/vmalloc.h> 27 28 #include <asm/cacheflush.h> 29 #include <asm/hardware.h> 30 #include <asm/io.h> 31 #include <asm/tlbflush.h> 32 33 static inline void 34 remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, 35 unsigned long phys_addr, pgprot_t pgprot) 36 { 37 unsigned long end; 38 39 address &= ~PMD_MASK; 40 end = address + size; 41 if (end > PMD_SIZE) 42 end = PMD_SIZE; 43 BUG_ON(address >= end); 44 do { 45 if (!pte_none(*pte)) 46 goto bad; 47 48 set_pte(pte, pfn_pte(phys_addr >> PAGE_SHIFT, pgprot)); 49 address += PAGE_SIZE; 50 phys_addr += PAGE_SIZE; 51 pte++; 52 } while (address && (address < end)); 53 return; 54 55 bad: 56 printk("remap_area_pte: page already exists\n"); 57 BUG(); 58 } 59 60 static inline int 61 remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, 62 unsigned long phys_addr, unsigned long flags) 63 { 64 unsigned long end; 65 pgprot_t pgprot; 66 67 address &= ~PGDIR_MASK; 68 end = address + size; 69 70 if (end > PGDIR_SIZE) 71 end = PGDIR_SIZE; 72 73 phys_addr -= address; 74 BUG_ON(address >= end); 75 76 pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags); 77 do { 78 pte_t * pte = pte_alloc_kernel(pmd, address); 79 if (!pte) 80 return -ENOMEM; 81 remap_area_pte(pte, address, end - address, address + phys_addr, pgprot); 82 address = (address + PMD_SIZE) & PMD_MASK; 83 pmd++; 84 } while (address && (address < end)); 85 return 0; 86 } 87 88 static int 89 remap_area_pages(unsigned long start, unsigned long phys_addr, 90 unsigned long size, unsigned long flags) 91 { 92 unsigned long address = start; 93 unsigned long end = start + size; 94 int err = 0; 95 pgd_t * dir; 96 97 phys_addr -= address; 98 dir = pgd_offset(&init_mm, address); 99 BUG_ON(address >= end); 100 do { 101 pmd_t *pmd = pmd_alloc(&init_mm, dir, address); 102 if (!pmd) { 103 err = -ENOMEM; 104 break; 105 } 106 if (remap_area_pmd(pmd, address, end - address, 107 phys_addr + address, flags)) { 108 err = -ENOMEM; 109 break; 110 } 111 112 address = (address + PGDIR_SIZE) & PGDIR_MASK; 113 dir++; 114 } while (address && (address < end)); 115 116 flush_cache_vmap(start, end); 117 return err; 118 } 119 120 /* 121 * Remap an arbitrary physical address space into the kernel virtual 122 * address space. Needed when the kernel wants to access high addresses 123 * directly. 124 * 125 * NOTE! We need to allow non-page-aligned mappings too: we will obviously 126 * have to convert them into an offset in a page-aligned mapping, but the 127 * caller shouldn't need to know that small detail. 128 * 129 * 'flags' are the extra L_PTE_ flags that you want to specify for this 130 * mapping. See include/asm-arm/proc-armv/pgtable.h for more information. 131 */ 132 void __iomem * 133 __ioremap(unsigned long phys_addr, size_t size, unsigned long flags, 134 unsigned long align) 135 { 136 void * addr; 137 struct vm_struct * area; 138 unsigned long offset, last_addr; 139 140 /* Don't allow wraparound or zero size */ 141 last_addr = phys_addr + size - 1; 142 if (!size || last_addr < phys_addr) 143 return NULL; 144 145 /* 146 * Mappings have to be page-aligned 147 */ 148 offset = phys_addr & ~PAGE_MASK; 149 phys_addr &= PAGE_MASK; 150 size = PAGE_ALIGN(last_addr + 1) - phys_addr; 151 152 /* 153 * Ok, go for it.. 154 */ 155 area = get_vm_area(size, VM_IOREMAP); 156 if (!area) 157 return NULL; 158 addr = area->addr; 159 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { 160 vfree(addr); 161 return NULL; 162 } 163 return (void __iomem *) (offset + (char *)addr); 164 } 165 EXPORT_SYMBOL(__ioremap); 166 167 void __iounmap(void __iomem *addr) 168 { 169 vfree((void *) (PAGE_MASK & (unsigned long) addr)); 170 } 171 EXPORT_SYMBOL(__iounmap); 172 173 #ifdef __io 174 void __iomem *ioport_map(unsigned long port, unsigned int nr) 175 { 176 return __io(port); 177 } 178 EXPORT_SYMBOL(ioport_map); 179 180 void ioport_unmap(void __iomem *addr) 181 { 182 } 183 EXPORT_SYMBOL(ioport_unmap); 184 #endif 185 186 #ifdef CONFIG_PCI 187 #include <linux/pci.h> 188 #include <linux/ioport.h> 189 190 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) 191 { 192 unsigned long start = pci_resource_start(dev, bar); 193 unsigned long len = pci_resource_len(dev, bar); 194 unsigned long flags = pci_resource_flags(dev, bar); 195 196 if (!len || !start) 197 return NULL; 198 if (maxlen && len > maxlen) 199 len = maxlen; 200 if (flags & IORESOURCE_IO) 201 return ioport_map(start, len); 202 if (flags & IORESOURCE_MEM) { 203 if (flags & IORESOURCE_CACHEABLE) 204 return ioremap(start, len); 205 return ioremap_nocache(start, len); 206 } 207 return NULL; 208 } 209 EXPORT_SYMBOL(pci_iomap); 210 211 void pci_iounmap(struct pci_dev *dev, void __iomem *addr) 212 { 213 if ((unsigned long)addr >= VMALLOC_START && 214 (unsigned long)addr < VMALLOC_END) 215 iounmap(addr); 216 } 217 EXPORT_SYMBOL(pci_iounmap); 218 #endif 219