1 /* 2 * arch/sh/mm/ioremap.c 3 * 4 * Re-map IO memory to kernel address space so that we can access it. 5 * This is needed for high PCI addresses that aren't mapped in the 6 * 640k-1MB IO memory area on PC's 7 * 8 * (C) Copyright 1995 1996 Linus Torvalds 9 * (C) Copyright 2005, 2006 Paul Mundt 10 * 11 * This file is subject to the terms and conditions of the GNU General 12 * Public License. See the file "COPYING" in the main directory of this 13 * archive for more details. 14 */ 15 #include <linux/vmalloc.h> 16 #include <linux/module.h> 17 #include <linux/mm.h> 18 #include <linux/pci.h> 19 #include <linux/io.h> 20 #include <asm/page.h> 21 #include <asm/pgalloc.h> 22 #include <asm/addrspace.h> 23 #include <asm/cacheflush.h> 24 #include <asm/tlbflush.h> 25 26 /* 27 * Remap an arbitrary physical address space into the kernel virtual 28 * address space. Needed when the kernel wants to access high addresses 29 * directly. 30 * 31 * NOTE! We need to allow non-page-aligned mappings too: we will obviously 32 * have to convert them into an offset in a page-aligned mapping, but the 33 * caller shouldn't need to know that small detail. 34 */ 35 void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, 36 unsigned long flags) 37 { 38 struct vm_struct * area; 39 unsigned long offset, last_addr, addr, orig_addr; 40 pgprot_t pgprot; 41 42 /* Don't allow wraparound or zero size */ 43 last_addr = phys_addr + size - 1; 44 if (!size || last_addr < phys_addr) 45 return NULL; 46 47 /* 48 * If we're on an SH7751 or SH7780 PCI controller, PCI memory is 49 * mapped at the end of the address space (typically 0xfd000000) 50 * in a non-translatable area, so mapping through page tables for 51 * this area is not only pointless, but also fundamentally 52 * broken. Just return the physical address instead. 53 * 54 * For boards that map a small PCI memory aperture somewhere in 55 * P1/P2 space, ioremap() will already do the right thing, 56 * and we'll never get this far. 57 */ 58 if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr)) 59 return (void __iomem *)phys_addr; 60 61 /* 62 * Don't allow anybody to remap normal RAM that we're using.. 63 */ 64 if (phys_addr < virt_to_phys(high_memory)) 65 return NULL; 66 67 /* 68 * Mappings have to be page-aligned 69 */ 70 offset = phys_addr & ~PAGE_MASK; 71 phys_addr &= PAGE_MASK; 72 size = PAGE_ALIGN(last_addr+1) - phys_addr; 73 74 /* 75 * Ok, go for it.. 76 */ 77 area = get_vm_area(size, VM_IOREMAP); 78 if (!area) 79 return NULL; 80 area->phys_addr = phys_addr; 81 orig_addr = addr = (unsigned long)area->addr; 82 83 #ifdef CONFIG_32BIT 84 /* 85 * First try to remap through the PMB once a valid VMA has been 86 * established. Smaller allocations (or the rest of the size 87 * remaining after a PMB mapping due to the size not being 88 * perfectly aligned on a PMB size boundary) are then mapped 89 * through the UTLB using conventional page tables. 90 * 91 * PMB entries are all pre-faulted. 92 */ 93 if (unlikely(size >= 0x1000000)) { 94 unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); 95 96 if (likely(mapped)) { 97 addr += mapped; 98 phys_addr += mapped; 99 size -= mapped; 100 } 101 } 102 #endif 103 104 pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); 105 if (likely(size)) 106 if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { 107 vunmap((void *)orig_addr); 108 return NULL; 109 } 110 111 return (void __iomem *)(offset + (char *)orig_addr); 112 } 113 EXPORT_SYMBOL(__ioremap); 114 115 void __iounmap(void __iomem *addr) 116 { 117 unsigned long vaddr = (unsigned long __force)addr; 118 struct vm_struct *p; 119 120 if (PXSEG(vaddr) < P3SEG || is_pci_memaddr(vaddr)) 121 return; 122 123 #ifdef CONFIG_32BIT 124 /* 125 * Purge any PMB entries that may have been established for this 126 * mapping, then proceed with conventional VMA teardown. 127 * 128 * XXX: Note that due to the way that remove_vm_area() does 129 * matching of the resultant VMA, we aren't able to fast-forward 130 * the address past the PMB space until the end of the VMA where 131 * the page tables reside. As such, unmap_vm_area() will be 132 * forced to linearly scan over the area until it finds the page 133 * tables where PTEs that need to be unmapped actually reside, 134 * which is far from optimal. Perhaps we need to use a separate 135 * VMA for the PMB mappings? 136 * -- PFM. 137 */ 138 pmb_unmap(vaddr); 139 #endif 140 141 p = remove_vm_area((void *)(vaddr & PAGE_MASK)); 142 if (!p) { 143 printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr); 144 return; 145 } 146 147 kfree(p); 148 } 149 EXPORT_SYMBOL(__iounmap); 150